summaryrefslogtreecommitdiff
path: root/db/lock
diff options
context:
space:
mode:
authorPanu Matilainen <pmatilai@redhat.com>2007-07-16 16:48:14 +0300
committerPanu Matilainen <pmatilai@redhat.com>2007-07-16 16:48:14 +0300
commit2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79 (patch)
treee12ee52087506ac8c7a5eee83b17497d98df2d40 /db/lock
parentb754fe19fd387ca5fe8e7c00ddaa25c898fa192f (diff)
downloadrpm-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.tar.gz
rpm-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.tar.bz2
rpm-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.zip
Update internal BDB to version 4.5.20
Diffstat (limited to 'db/lock')
-rw-r--r--db/lock/Design2
-rw-r--r--db/lock/lock.c413
-rw-r--r--db/lock/lock_deadlock.c232
-rw-r--r--db/lock/lock_failchk.c95
-rw-r--r--db/lock/lock_id.c135
-rw-r--r--db/lock/lock_list.c37
-rw-r--r--db/lock/lock_method.c191
-rw-r--r--db/lock/lock_region.c172
-rw-r--r--db/lock/lock_stat.c170
-rw-r--r--db/lock/lock_timer.c26
-rw-r--r--db/lock/lock_util.c63
11 files changed, 717 insertions, 819 deletions
diff --git a/db/lock/Design b/db/lock/Design
index f0bb5c6e9..0fcdca2a2 100644
--- a/db/lock/Design
+++ b/db/lock/Design
@@ -1,4 +1,4 @@
-# $Id: Design,v 11.5 2002/02/01 19:07:18 bostic Exp $
+# $Id: Design,v 12.0 2004/11/17 03:44:06 bostic Exp $
Synchronization in the Locking Subsystem
diff --git a/db/lock/lock.c b/db/lock/lock.c
index 2b4f63e0d..03cd11831 100644
--- a/db/lock/lock.c
+++ b/db/lock/lock.c
@@ -1,22 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock.c,v 11.167 2004/10/15 16:59:41 bostic Exp $
+ * $Id: lock.c,v 12.30 2006/08/24 14:46:10 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
@@ -29,7 +22,7 @@ static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
static int __lock_put_internal __P((DB_LOCKTAB *,
struct __db_lock *, u_int32_t, u_int32_t));
static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));
-static void __lock_remove_waiter __P((DB_LOCKTAB *,
+static int __lock_remove_waiter __P((DB_LOCKTAB *,
DB_LOCKOBJ *, struct __db_lock *, db_status_t));
static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));
@@ -50,7 +43,8 @@ __lock_vec_pp(dbenv, locker, flags, list, nlist, elistp)
int nlist;
DB_LOCKREQ *list, **elistp;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
@@ -61,12 +55,10 @@ __lock_vec_pp(dbenv, locker, flags, list, nlist, elistp)
"DB_ENV->lock_vec", flags, DB_LOCK_NOWAIT)) != 0)
return (ret);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_vec(dbenv, locker, flags, list, nlist, elistp);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv,
+ (__lock_vec(dbenv, locker, flags, list, nlist, elistp)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -108,7 +100,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
region = lt->reginfo.primary;
run_dd = 0;
- LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ LOCK_SYSTEM_LOCK(dbenv);
for (i = 0, ret = 0; i < nlist && ret == 0; i++)
switch (list[i].op) {
case DB_LOCK_GET_TIMEOUT:
@@ -119,7 +111,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
LOCK_INIT(list[i].lock);
break;
}
- ret = __lock_get_internal(dbenv->lk_handle,
+ ret = __lock_get_internal(lt,
locker, flags, list[i].obj,
list[i].mode, list[i].timeout, &list[i].lock);
break;
@@ -190,7 +182,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
locker_links, __db_lock);
if (writes == 1 ||
lp->mode == DB_LOCK_READ ||
- lp->mode == DB_LOCK_DIRTY) {
+ lp->mode == DB_LOCK_READ_UNCOMMITTED) {
SH_LIST_REMOVE(lp,
locker_links, __db_lock);
sh_obj = (DB_LOCKOBJ *)
@@ -211,8 +203,8 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
continue;
}
if (objlist != NULL) {
- DB_ASSERT((char *)np <
- (char *)objlist->data +
+ DB_ASSERT(dbenv, (u_int8_t *)np <
+ (u_int8_t *)objlist->data +
objlist->size);
np->data = SH_DBT_PTR(&sh_obj->lockobj);
np->size = sh_obj->lockobj.size;
@@ -230,11 +222,8 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
case DB_LOCK_UPGRADE_WRITE:
if (upgrade != 1)
goto up_done;
- for (lp = SH_LIST_FIRST(
- &sh_locker->heldby, __db_lock);
- lp != NULL;
- lp = SH_LIST_NEXT(lp,
- locker_links, __db_lock)) {
+ SH_LIST_FOREACH(lp, &sh_locker->heldby,
+ locker_links, __db_lock) {
if (lp->mode != DB_LOCK_WWRITE)
continue;
lock.off = R_OFFSET(&lt->reginfo, lp);
@@ -325,15 +314,13 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
F_ISSET(sh_locker, DB_LOCKER_DELETED))
break;
- for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
- lp != NULL;
- lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {
+ SH_LIST_FOREACH(
+ lp, &sh_locker->heldby, locker_links, __db_lock)
__lock_printlock(lt, NULL, lp, 1);
- }
break;
#endif
default:
- __db_err(dbenv,
+ __db_errx(dbenv,
"Invalid lock operation: %d", list[i].op);
ret = EINVAL;
break;
@@ -342,7 +329,7 @@ __lock_vec(dbenv, locker, flags, list, nlist, elistp)
if (ret == 0 && region->detect != DB_LOCK_NORUN &&
(region->need_dd || LOCK_TIME_ISVALID(&region->next_timeout)))
run_dd = 1;
- UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ LOCK_SYSTEM_UNLOCK(dbenv);
if (run_dd)
(void)__lock_detect(dbenv, region->detect, &did_abort);
@@ -368,7 +355,8 @@ __lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock)
db_lockmode_t lock_mode;
DB_LOCK *lock;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
@@ -379,12 +367,10 @@ __lock_get_pp(dbenv, locker, flags, obj, lock_mode, lock)
DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
return (ret);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_get(dbenv, locker, flags, obj, lock_mode, lock);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv,
+ (__lock_get(dbenv, locker, flags, obj, lock_mode, lock)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -403,17 +389,19 @@ __lock_get(dbenv, locker, flags, obj, lock_mode, lock)
db_lockmode_t lock_mode;
DB_LOCK *lock;
{
+ DB_LOCKTAB *lt;
int ret;
+ lt = dbenv->lk_handle;
+
if (IS_RECOVERING(dbenv)) {
LOCK_INIT(*lock);
return (0);
}
- LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
- ret = __lock_get_internal(dbenv->lk_handle,
- locker, flags, obj, lock_mode, 0, lock);
- UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ LOCK_SYSTEM_LOCK(dbenv);
+ ret = __lock_get_internal(lt, locker, flags, obj, lock_mode, 0, lock);
+ LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -434,11 +422,12 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
db_timeout_t timeout;
DB_LOCK *lock;
{
- struct __db_lock *newl, *lp, *wwrite;
+ struct __db_lock *newl, *lp;
DB_ENV *dbenv;
DB_LOCKER *sh_locker;
DB_LOCKOBJ *sh_obj;
DB_LOCKREGION *region;
+ DB_THREAD_INFO *ip;
u_int32_t holder, locker_ndx, obj_ndx;
int did_abort, ihold, grant_dirty, no_dd, ret, t_ret;
@@ -463,24 +452,21 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
no_dd = ret = 0;
newl = NULL;
-
- /*
- * If we are not going to reuse this lock, invalidate it
- * so that if we fail it will not look like a valid lock.
- */
- if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
- LOCK_INIT(*lock);
+ sh_obj = NULL;
/* Check that the lock mode is valid. */
if (lock_mode >= (db_lockmode_t)region->stat.st_nmodes) {
- __db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
+ __db_errx(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
(u_long)lock_mode);
return (EINVAL);
}
- region->stat.st_nrequests++;
+ if (LF_ISSET(DB_LOCK_UPGRADE))
+ region->stat.st_nupgrade++;
+ else if (!LF_ISSET(DB_LOCK_SWITCH))
+ region->stat.st_nrequests++;
if (obj == NULL) {
- DB_ASSERT(LOCK_ISSET(*lock));
+ DB_ASSERT(dbenv, LOCK_ISSET(*lock));
lp = R_ADDR(&lt->reginfo, lock->off);
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
} else {
@@ -493,17 +479,11 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
/* Get the locker, we may need it to find our parent. */
LOCKER_LOCK(lt, region, locker, locker_ndx);
if ((ret = __lock_getlocker(lt, locker,
- locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) {
- /*
- * XXX
- * We cannot tell if we created the object or not, so we don't
- * kow if we should free it or not.
- */
+ locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0)
goto err;
- }
if (sh_locker == NULL) {
- __db_err(dbenv, "Locker does not exist");
+ __db_errx(dbenv, "Locker does not exist");
ret = EINVAL;
goto err;
}
@@ -534,7 +514,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
ihold = 0;
grant_dirty = 0;
holder = 0;
- wwrite = NULL;
/*
* SWITCH is a special case, used by the queue access method
@@ -548,6 +527,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
else
lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ DB_ASSERT(dbenv, lp->status != DB_LSTAT_FREE);
if (locker == lp->holder) {
if (lp->mode == lock_mode &&
lp->status == DB_LSTAT_HELD) {
@@ -568,9 +548,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
goto done;
} else {
ihold = 1;
- if (lock_mode == DB_LOCK_WRITE &&
- lp->mode == DB_LOCK_WWRITE)
- wwrite = lp;
}
} else if (__lock_is_parent(lt, lp->holder, sh_locker))
ihold = 1;
@@ -583,17 +560,15 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
}
}
- /* If we want a write lock and we have a was write, upgrade. */
- if (wwrite != NULL)
- LF_SET(DB_LOCK_UPGRADE);
-
/*
- * If there are conflicting holders we will have to wait. An upgrade
- * or dirty reader goes to the head of the queue, everyone else to the
- * back.
+ * If there are conflicting holders we will have to wait. If we
+ * already hold a lock on this object or are doing an upgrade or
+ * this is a dirty reader it goes to the head of the queue, everyone
+ * else to the back.
*/
if (lp != NULL) {
- if (LF_ISSET(DB_LOCK_UPGRADE) || lock_mode == DB_LOCK_DIRTY)
+ if (ihold || LF_ISSET(DB_LOCK_UPGRADE) ||
+ lock_mode == DB_LOCK_READ_UNCOMMITTED)
action = HEAD;
else
action = TAIL;
@@ -608,13 +583,12 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
/*
* Look for conflicting waiters.
*/
- for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
- lp != NULL;
- lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ SH_TAILQ_FOREACH(
+ lp, &sh_obj->waiters, links, __db_lock)
if (CONFLICTS(lt, region, lp->mode,
lock_mode) && locker != lp->holder)
break;
- }
+
/*
* If there are no conflicting holders or waiters,
* then we grant. Normally when we wait, we
@@ -646,7 +620,8 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
*/
if (lp == NULL)
action = GRANT;
- else if (lock_mode == DB_LOCK_DIRTY && grant_dirty) {
+ else if (grant_dirty &&
+ lock_mode == DB_LOCK_READ_UNCOMMITTED) {
/*
* An upgrade will be at the head of the
* queue.
@@ -658,7 +633,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
action = SECOND;
else
action = GRANT;
- } else if (lock_mode == DB_LOCK_DIRTY)
+ } else if (lock_mode == DB_LOCK_READ_UNCOMMITTED)
action = SECOND;
else
action = TAIL;
@@ -672,14 +647,33 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
case GRANT:
/* Allocate a new lock. */
if ((newl =
- SH_TAILQ_FIRST(&region->free_locks, __db_lock)) == NULL)
- return (__lock_nomem(dbenv, "locks"));
+ SH_TAILQ_FIRST(&region->free_locks, __db_lock)) == NULL) {
+ ret = __lock_nomem(dbenv, "locks");
+ goto err;
+ }
SH_TAILQ_REMOVE(&region->free_locks, newl, links, __db_lock);
/* Update new lock statistics. */
if (++region->stat.st_nlocks > region->stat.st_maxnlocks)
region->stat.st_maxnlocks = region->stat.st_nlocks;
+ /*
+ * Allocate a mutex if we do not have a mutex backing the lock.
+ *
+ * Use the lock mutex to block the thread; lock the mutex
+ * when it is allocated so that we will block when we try
+ * to lock it again. We will wake up when another thread
+ * grants the lock and releases the mutex. We leave it
+ * locked for the next use of this lock object.
+ */
+ if (newl->mtx_lock == MUTEX_INVALID) {
+ if ((ret = __mutex_alloc(dbenv, MTX_LOGICAL_LOCK,
+ DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_SELF_BLOCK,
+ &newl->mtx_lock)) != 0)
+ goto err;
+ MUTEX_LOCK(dbenv, newl->mtx_lock);
+ }
+
newl->holder = locker;
newl->refcount = 1;
newl->mode = lock_mode;
@@ -700,15 +694,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
break;
case UPGRADE:
-upgrade: if (wwrite != NULL) {
- lp = wwrite;
- lp->refcount++;
- lock->off = R_OFFSET(&lt->reginfo, lp);
- lock->gen = lp->gen;
- lock->mode = lock_mode;
- }
- else
- lp = R_ADDR(&lt->reginfo, lock->off);
+upgrade: lp = R_ADDR(&lt->reginfo, lock->off);
if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode))
sh_locker->nwrites++;
lp->mode = lock_mode;
@@ -717,7 +703,7 @@ upgrade: if (wwrite != NULL) {
switch (action) {
case UPGRADE:
- DB_ASSERT(0);
+ DB_ASSERT(dbenv, 0);
break;
case GRANT:
newl->status = DB_LSTAT_HELD;
@@ -728,7 +714,7 @@ upgrade: if (wwrite != NULL) {
case SECOND:
if (LF_ISSET(DB_LOCK_NOWAIT)) {
ret = DB_LOCK_NOTGRANTED;
- region->stat.st_nnowaits++;
+ region->stat.st_lock_nowait++;
goto err;
}
if ((lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) == NULL)
@@ -747,26 +733,19 @@ upgrade: if (wwrite != NULL) {
SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
break;
default:
- DB_ASSERT(0);
+ DB_ASSERT(dbenv, 0);
}
/* If we are switching drop the lock we had. */
if (LF_ISSET(DB_LOCK_SWITCH) &&
(ret = __lock_put_nolock(dbenv,
lock, &ihold, DB_LOCK_NOWAITERS)) != 0) {
- __lock_remove_waiter(lt, sh_obj, newl, DB_LSTAT_FREE);
+ (void)__lock_remove_waiter(
+ lt, sh_obj, newl, DB_LSTAT_FREE);
goto err;
}
/*
- * This is really a blocker for the thread. It should be
- * initialized locked, so that when we try to acquire it, we
- * block.
- */
- newl->status = DB_LSTAT_WAITING;
- region->stat.st_nconflicts++;
- region->need_dd = 1;
- /*
* First check to see if this txn has expired.
* If not then see if the lock timeout is past
* the expiration of the txn, if it is, use
@@ -808,17 +787,29 @@ upgrade: if (wwrite != NULL) {
LOCK_TIME_GREATER(
&region->next_timeout, &sh_locker->lk_expire)))
region->next_timeout = sh_locker->lk_expire;
- UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ newl->status = DB_LSTAT_WAITING;
+ region->stat.st_lock_wait++;
+ /* We are about to block, deadlock detector must run. */
+ region->need_dd = 1;
+
+ LOCK_SYSTEM_UNLOCK(dbenv);
/*
- * We are about to wait; before waiting, see if the deadlock
- * detector should be run.
+ * Before waiting, see if the deadlock detector should run.
*/
if (region->detect != DB_LOCK_NORUN && !no_dd)
(void)__lock_detect(dbenv, region->detect, &did_abort);
- MUTEX_LOCK(dbenv, &newl->mutex);
- LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ ip = NULL;
+ if (dbenv->thr_hashtab != NULL &&
+ (ret = __env_set_state(dbenv, &ip, THREAD_BLOCKED)) != 0)
+ goto err;
+ MUTEX_LOCK(dbenv, newl->mtx_lock);
+ if (ip != NULL)
+ ip->dbth_state = THREAD_ACTIVE;
+
+ LOCK_SYSTEM_LOCK(dbenv);
/* Turn off lock timeout. */
if (newl->status != DB_LSTAT_EXPIRED)
@@ -828,20 +819,20 @@ upgrade: if (wwrite != NULL) {
case DB_LSTAT_ABORTED:
ret = DB_LOCK_DEADLOCK;
goto err;
- case DB_LSTAT_NOTEXIST:
- ret = DB_LOCK_NOTEXIST;
- goto err;
case DB_LSTAT_EXPIRED:
expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx);
- if ((ret = __lock_put_internal(lt, newl,
- obj_ndx, DB_LOCK_UNLINK | DB_LOCK_FREE)) != 0)
- break;
+ ret = __lock_put_internal(lt, newl,
+ obj_ndx, DB_LOCK_UNLINK | DB_LOCK_FREE);
+ newl = NULL;
+ if (ret != 0)
+ goto err;
if (LOCK_TIME_EQUAL(
&sh_locker->lk_expire, &sh_locker->tx_expire))
region->stat.st_ntxntimeouts++;
else
region->stat.st_nlocktimeouts++;
- return (DB_LOCK_NOTGRANTED);
+ ret = DB_LOCK_NOTGRANTED;
+ goto err;
case DB_LSTAT_PENDING:
if (LF_ISSET(DB_LOCK_UPGRADE)) {
/*
@@ -865,7 +856,7 @@ expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx);
case DB_LSTAT_HELD:
case DB_LSTAT_WAITING:
default:
- __db_err(dbenv,
+ __db_errx(dbenv,
"Unexpected lock status: %d", (int)newl->status);
ret = __db_panic(dbenv, EINVAL);
goto err;
@@ -876,18 +867,22 @@ expired: SHOBJECT_LOCK(lt, region, sh_obj, obj_ndx);
lock->gen = newl->gen;
lock->mode = newl->mode;
sh_locker->nlocks++;
- if (IS_WRITELOCK(newl->mode))
+ if (IS_WRITELOCK(newl->mode)) {
sh_locker->nwrites++;
+ if (newl->mode == DB_LOCK_WWRITE)
+ F_SET(sh_locker, DB_LOCKER_DIRTY);
+ }
return (0);
-done:
- ret = 0;
-err:
- if (newl != NULL &&
+err: if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
+ LOCK_INIT(*lock);
+
+done: if (newl != NULL &&
(t_ret = __lock_freelock(lt, newl, locker,
DB_LOCK_FREE | DB_LOCK_UNLINK)) != 0 && ret == 0)
ret = t_ret;
+
return (ret);
}
@@ -902,32 +897,29 @@ __lock_put_pp(dbenv, lock)
DB_ENV *dbenv;
DB_LOCK *lock;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_put(dbenv, lock, 0);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_put(dbenv, lock)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
/*
* __lock_put --
*
- * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *, u_int32_t));
+ * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *));
* Internal lock_put interface.
*/
int
-__lock_put(dbenv, lock, flags)
+__lock_put(dbenv, lock)
DB_ENV *dbenv;
DB_LOCK *lock;
- u_int32_t flags;
{
DB_LOCKTAB *lt;
int ret, run_dd;
@@ -937,9 +929,9 @@ __lock_put(dbenv, lock, flags)
lt = dbenv->lk_handle;
- LOCKREGION(dbenv, lt);
- ret = __lock_put_nolock(dbenv, lock, &run_dd, flags);
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
+ ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
+ LOCK_SYSTEM_UNLOCK(dbenv);
/*
* Only run the lock detector if put told us to AND we are running
@@ -974,20 +966,13 @@ __lock_put_nolock(dbenv, lock, runp, flags)
lockp = R_ADDR(&lt->reginfo, lock->off);
if (lock->gen != lockp->gen) {
- __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put");
+ __db_errx(dbenv, __db_lock_invalid, "DB_LOCK->lock_put");
LOCK_INIT(*lock);
return (EINVAL);
}
- if (LF_ISSET(DB_LOCK_DOWNGRADE) &&
- lock->mode == DB_LOCK_WRITE && lockp->refcount > 1) {
- ret = __lock_downgrade(dbenv,
- lock, DB_LOCK_WWRITE, DB_LOCK_NOREGION);
- if (ret == 0)
- lockp->refcount--;
- } else
- ret = __lock_put_internal(lt,
- lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
+ ret = __lock_put_internal(lt,
+ lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
LOCK_INIT(*lock);
*runp = 0;
@@ -1036,11 +1021,13 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
region = lt->reginfo.primary;
if (!LF_ISSET(DB_LOCK_NOREGION))
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
+
+ region->stat.st_ndowngrade++;
lockp = R_ADDR(&lt->reginfo, lock->off);
if (lock->gen != lockp->gen) {
- __db_err(dbenv, __db_lock_invalid, "lock_downgrade");
+ __db_errx(dbenv, __db_lock_invalid, "lock_downgrade");
ret = EINVAL;
goto out;
}
@@ -1051,24 +1038,21 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
if (ret == 0)
ret = EINVAL;
- __db_err(dbenv, __db_locker_invalid);
+ __db_errx(dbenv, __db_locker_invalid);
goto out;
}
if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode))
sh_locker->nwrites--;
- if (new_mode == DB_LOCK_WWRITE)
- F_SET(sh_locker, DB_LOCKER_DIRTY);
-
lockp->mode = new_mode;
lock->mode = new_mode;
/* Get the object associated with this lock. */
obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
- (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
+ ret = __lock_promote(lt, obj, NULL, LF_ISSET(DB_LOCK_NOWAITERS));
out: if (!LF_ISSET(DB_LOCK_NOREGION))
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -1113,10 +1097,18 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
/* Get the object associated with this lock. */
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
- /* Remove this lock from its holders/waitlist. */
- if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING)
- __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
- else {
+ /*
+ * Remove this lock from its holders/waitlist. Set its status
+ * to ABORTED. It may get freed below, but if not then the
+ * waiter has been aborted (it will panic if the lock is
+ * free).
+ */
+ if (lockp->status != DB_LSTAT_HELD &&
+ lockp->status != DB_LSTAT_PENDING) {
+ if ((ret = __lock_remove_waiter(
+ lt, sh_obj, lockp, DB_LSTAT_ABORTED)) != 0)
+ return (ret);
+ } else {
SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
lockp->links.stqe_prev = -1;
}
@@ -1124,14 +1116,15 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
if (LF_ISSET(DB_LOCK_NOPROMOTE))
state_changed = 0;
else
- state_changed = __lock_promote(lt,
- sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS));
+ if ((ret = __lock_promote(lt, sh_obj, &state_changed,
+ LF_ISSET(DB_LOCK_NOWAITERS))) != 0)
+ return (ret);
/* Check if object should be reclaimed. */
if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL &&
SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
- HASHREMOVE_EL(lt->obj_tab,
- obj_ndx, __db_lockobj, links, sh_obj);
+ SH_TAILQ_REMOVE(
+ &lt->obj_tab[obj_ndx], sh_obj, links, __db_lockobj);
if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
__db_shalloc_free(&lt->reginfo,
SH_DBT_PTR(&sh_obj->lockobj));
@@ -1174,16 +1167,13 @@ __lock_freelock(lt, lockp, locker, flags)
dbenv = lt->dbenv;
region = lt->reginfo.primary;
- ret = 0;
if (LF_ISSET(DB_LOCK_UNLINK)) {
LOCKER_LOCK(lt, region, locker, indx);
if ((ret = __lock_getlocker(lt,
locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
- if (ret == 0)
- ret = EINVAL;
- __db_err(dbenv, __db_locker_invalid);
- return (ret);
+ __db_errx(dbenv, __db_locker_invalid);
+ return (ret == 0 ? EINVAL : ret);
}
SH_LIST_REMOVE(lockp, locker_links, __db_lock);
@@ -1195,13 +1185,23 @@ __lock_freelock(lt, lockp, locker, flags)
}
if (LF_ISSET(DB_LOCK_FREE)) {
+ /*
+ * If the lock is not held we cannot be sure of its mutex
+ * state so we just destroy it and let it be re-created
+ * when needed.
+ */
+ if (lockp->mtx_lock != MUTEX_INVALID &&
+ lockp->status != DB_LSTAT_HELD &&
+ lockp->status != DB_LSTAT_EXPIRED &&
+ (ret = __mutex_free(dbenv, &lockp->mtx_lock)) != 0)
+ return (ret);
lockp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(
&region->free_locks, lockp, links, __db_lock);
region->stat.st_nlocks--;
}
- return (ret);
+ return (0);
}
/*
@@ -1230,8 +1230,11 @@ __lock_getobj(lt, obj, ndx, create, retp)
region = lt->reginfo.primary;
/* Look up the object in the hash table. */
- HASHLOOKUP(lt->obj_tab,
- ndx, __db_lockobj, links, obj, sh_obj, __lock_cmp);
+ SH_TAILQ_FOREACH(sh_obj, &lt->obj_tab[ndx], links, __db_lockobj)
+ if (obj->size == sh_obj->lockobj.size &&
+ memcmp(obj->data,
+ SH_DBT_PTR(&sh_obj->lockobj), obj->size) == 0)
+ break;
/*
* If we found the object, then we can just return it. If
@@ -1253,7 +1256,7 @@ __lock_getobj(lt, obj, ndx, create, retp)
p = sh_obj->objdata;
else if ((ret =
__db_shalloc(&lt->reginfo, obj->size, 0, &p)) != 0) {
- __db_err(dbenv, "No space for lock object storage");
+ __db_errx(dbenv, "No space for lock object storage");
goto err;
}
@@ -1269,8 +1272,8 @@ __lock_getobj(lt, obj, ndx, create, retp)
sh_obj->lockobj.size = obj->size;
sh_obj->lockobj.off =
(roff_t)SH_PTR_TO_OFF(&sh_obj->lockobj, p);
-
- HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj);
+ SH_TAILQ_INSERT_HEAD(
+ &lt->obj_tab[ndx], sh_obj, links, __db_lockobj);
}
*retp = sh_obj;
@@ -1330,7 +1333,7 @@ __lock_locker_is_parent(dbenv, locker, child, retp)
LOCKER_LOCK(lt, region, child, locker_ndx);
if ((ret =
__lock_getlocker(lt, child, locker_ndx, 0, &sh_locker)) != 0) {
- __db_err(dbenv, __db_locker_invalid);
+ __db_errx(dbenv, __db_locker_invalid);
return (ret);
}
@@ -1381,15 +1384,14 @@ __lock_inherit_locks(lt, locker, flags)
F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
if (ret == 0 && sh_locker != NULL)
ret = EINVAL;
- __db_err(dbenv, __db_locker_invalid);
- goto err;
+ __db_errx(dbenv, __db_locker_invalid);
+ return (ret);
}
/* Make sure we are a child transaction. */
if (sh_locker->parent_locker == INVALID_ROFF) {
- __db_err(dbenv, "Not a child transaction");
- ret = EINVAL;
- goto err;
+ __db_errx(dbenv, "Not a child transaction");
+ return (EINVAL);
}
sh_parent = R_ADDR(&lt->reginfo, sh_locker->parent_locker);
F_SET(sh_locker, DB_LOCKER_DELETED);
@@ -1401,11 +1403,11 @@ __lock_inherit_locks(lt, locker, flags)
LOCKER_LOCK(lt, region, locker, ndx);
if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) {
if (ret == 0) {
- __db_err(dbenv,
+ __db_errx(dbenv,
"Parent locker is not valid");
ret = EINVAL;
}
- goto err;
+ return (ret);
}
/*
@@ -1421,9 +1423,7 @@ __lock_inherit_locks(lt, locker, flags)
/* See if the parent already has a lock. */
obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
- for (hlp = SH_TAILQ_FIRST(&obj->holders, __db_lock);
- hlp != NULL;
- hlp = SH_TAILQ_NEXT(hlp, links, __db_lock))
+ SH_TAILQ_FOREACH(hlp, &obj->holders, links, __db_lock)
if (hlp->holder == sh_parent->id &&
lp->mode == hlp->mode)
break;
@@ -1433,7 +1433,7 @@ __lock_inherit_locks(lt, locker, flags)
hlp->refcount += lp->refcount;
/* Remove lock from object list and free it. */
- DB_ASSERT(lp->status == DB_LSTAT_HELD);
+ DB_ASSERT(dbenv, lp->status == DB_LSTAT_HELD);
SH_TAILQ_REMOVE(&obj->holders, lp, links, __db_lock);
(void)__lock_freelock(lt, lp, locker, DB_LOCK_FREE);
} else {
@@ -1449,15 +1449,16 @@ __lock_inherit_locks(lt, locker, flags)
* reference count, because there might be a sibling waiting,
* who will now be allowed to make forward progress.
*/
- (void)__lock_promote(lt, obj,
- LF_ISSET(DB_LOCK_NOWAITERS));
+ if ((ret = __lock_promote(
+ lt, obj, NULL, LF_ISSET(DB_LOCK_NOWAITERS))) != 0)
+ return (ret);
}
/* Transfer child counts to parent. */
sh_parent->nlocks += sh_locker->nlocks;
sh_parent->nwrites += sh_locker->nwrites;
-err: return (ret);
+ return (ret);
}
/*
@@ -1466,12 +1467,14 @@ err: return (ret);
* Look through the waiters and holders lists and decide which (if any)
* locks can be promoted. Promote any that are eligible.
*
- * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+ * PUBLIC: int __lock_promote
+ * PUBLIC: __P((DB_LOCKTAB *, DB_LOCKOBJ *, int *, u_int32_t));
*/
int
-__lock_promote(lt, obj, flags)
+__lock_promote(lt, obj, state_changedp, flags)
DB_LOCKTAB *lt;
DB_LOCKOBJ *obj;
+ int *state_changedp;
u_int32_t flags;
{
struct __db_lock *lp_w, *lp_h, *next_waiter;
@@ -1510,21 +1513,17 @@ __lock_promote(lt, obj, flags)
if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT)
continue;
- if (LF_ISSET(DB_LOCK_REMOVE)) {
- __lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST);
- continue;
- }
- for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
- lp_h != NULL;
- lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
+ SH_TAILQ_FOREACH(lp_h, &obj->holders, links, __db_lock) {
if (lp_h->holder != lp_w->holder &&
CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) {
LOCKER_LOCK(lt,
region, lp_w->holder, locker_ndx);
if ((__lock_getlocker(lt, lp_w->holder,
locker_ndx, 0, &sh_locker)) != 0) {
- DB_ASSERT(0);
- break;
+ __db_errx(lt->dbenv,
+ "Locker %#lx missing",
+ (u_long)lp_w->holder);
+ return (__db_panic(lt->dbenv, EINVAL));
}
if (!__lock_is_parent(lt,
lp_h->holder, sh_locker))
@@ -1540,7 +1539,7 @@ __lock_promote(lt, obj, flags)
SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links);
/* Wake up waiter. */
- MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex);
+ MUTEX_UNLOCK(lt->dbenv, lp_w->mtx_lock);
state_changed = 1;
}
@@ -1550,7 +1549,11 @@ __lock_promote(lt, obj, flags)
*/
if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL)
SH_TAILQ_REMOVE(&region->dd_objs, obj, dd_links, __db_lockobj);
- return (state_changed);
+
+ if (state_changedp != NULL)
+ *state_changedp = state_changed;
+
+ return (0);
}
/*
@@ -1563,7 +1566,7 @@ __lock_promote(lt, obj, flags)
*
* This must be called with the Object bucket locked.
*/
-static void
+static int
__lock_remove_waiter(lt, sh_obj, lockp, status)
DB_LOCKTAB *lt;
DB_LOCKOBJ *sh_obj;
@@ -1589,7 +1592,9 @@ __lock_remove_waiter(lt, sh_obj, lockp, status)
* Wake whoever is waiting on this lock.
*/
if (do_wakeup)
- MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);
+ MUTEX_UNLOCK(lt->dbenv, lockp->mtx_lock);
+
+ return (0);
}
/*
@@ -1627,7 +1632,7 @@ __lock_trade(dbenv, lock, new_locker)
return (ret);
if (sh_locker == NULL) {
- __db_err(dbenv, "Locker does not exist");
+ __db_errx(dbenv, "Locker does not exist");
return (EINVAL);
}
diff --git a/db/lock/lock_deadlock.c b/db/lock/lock_deadlock.c
index 8caf82100..4dfcfc727 100644
--- a/db/lock/lock_deadlock.c
+++ b/db/lock/lock_deadlock.c
@@ -1,22 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_deadlock.c,v 11.86 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_deadlock.c,v 12.17 2006/08/24 14:46:10 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
#include "dbinc/txn.h"
@@ -30,7 +23,7 @@
}
#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
-#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
+#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~((u_int)1 << ((B) % 32)))
#define OR_MAP(D, S, N) { \
u_int32_t __i; \
@@ -51,7 +44,7 @@ typedef struct {
db_pgno_t pgno;
} locker_info;
-static int __dd_abort __P((DB_ENV *, locker_info *));
+static int __dd_abort __P((DB_ENV *, locker_info *, int *));
static int __dd_build __P((DB_ENV *,
u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
static int __dd_find __P((DB_ENV *,
@@ -77,7 +70,8 @@ __lock_detect_pp(dbenv, flags, atype, abortp)
u_int32_t flags, atype;
int *abortp;
{
- int ret, rep_check;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
@@ -98,17 +92,14 @@ __lock_detect_pp(dbenv, flags, atype, abortp)
case DB_LOCK_YOUNGEST:
break;
default:
- __db_err(dbenv,
+ __db_errx(dbenv,
"DB_ENV->lock_detect: unknown deadlock detection mode specified");
return (EINVAL);
}
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_detect(dbenv, atype, abortp);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_detect(dbenv, atype, abortp)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -126,19 +117,18 @@ __lock_detect(dbenv, atype, abortp)
{
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
- DB_TXNMGR *tmgr;
db_timeval_t now;
locker_info *idmap;
u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
u_int32_t i, cid, keeper, killid, limit, nalloc, nlockers;
u_int32_t lock_max, txn_max;
- int ret;
+ int ret, status;
/*
* If this environment is a replication client, then we must use the
* MINWRITE detection discipline.
*/
- if (__rep_is_client(dbenv))
+ if (IS_REP_CLIENT(dbenv))
atype = DB_LOCK_MINWRITE;
free_me = NULL;
@@ -148,7 +138,7 @@ __lock_detect(dbenv, atype, abortp)
*abortp = 0;
/* Check if a detector run is necessary. */
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
/* Make a pass only if auto-detect would run. */
region = lt->reginfo.primary;
@@ -157,7 +147,7 @@ __lock_detect(dbenv, atype, abortp)
if (region->need_dd == 0 &&
(!LOCK_TIME_ISVALID(&region->next_timeout) ||
!__lock_expired(dbenv, &now, &region->next_timeout))) {
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
return (0);
}
if (region->need_dd == 0)
@@ -169,33 +159,19 @@ __lock_detect(dbenv, atype, abortp)
/* Build the waits-for bitmap. */
ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
lock_max = region->stat.st_cur_maxid;
- UNLOCKREGION(dbenv, lt);
-
- /*
- * We need the cur_maxid from the txn region as well. In order
- * to avoid tricky synchronization between the lock and txn
- * regions, we simply unlock the lock region and then lock the
- * txn region. This introduces a small window during which the
- * transaction system could then wrap. We're willing to return
- * the wrong answer for "oldest" or "youngest" in those rare
- * circumstances.
- */
- tmgr = dbenv->tx_handle;
- if (tmgr != NULL) {
- R_LOCK(dbenv, &tmgr->reginfo);
- txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
- R_UNLOCK(dbenv, &tmgr->reginfo);
- } else
- txn_max = TXN_MAXIMUM;
+ LOCK_SYSTEM_UNLOCK(dbenv);
if (ret != 0 || atype == DB_LOCK_EXPIRE)
return (ret);
+ /* If there are no lockers, there are no deadlocks. */
if (nlockers == 0)
return (0);
+
#ifdef DIAGNOSTIC
if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
#endif
+
/* Now duplicate the bitmaps so we can verify deadlock participants. */
if ((ret = __os_calloc(dbenv, (size_t)nlockers,
sizeof(u_int32_t) * nalloc, &copymap)) != 0)
@@ -210,6 +186,23 @@ __lock_detect(dbenv, atype, abortp)
__dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
return (ret);
+ /*
+ * We need the cur_maxid from the txn region as well. In order
+ * to avoid tricky synchronization between the lock and txn
+ * regions, we simply unlock the lock region and then lock the
+ * txn region. This introduces a small window during which the
+ * transaction system could then wrap. We're willing to return
+ * the wrong answer for "oldest" or "youngest" in those rare
+ * circumstances.
+ */
+ if (TXN_ON(dbenv)) {
+ TXN_SYSTEM_LOCK(dbenv);
+ txn_max = ((DB_TXNREGION *)
+ dbenv->tx_handle->reginfo.primary)->cur_maxid;
+ TXN_SYSTEM_UNLOCK(dbenv);
+ } else
+ txn_max = TXN_MAXIMUM;
+
killid = BAD_KILLID;
free_me = deadp;
for (; *deadp != NULL; deadp++) {
@@ -327,24 +320,24 @@ dokill: if (killid == BAD_KILLID) {
* break the deadlock, signal to run
* detection again.
*/
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
region->need_dd = 1;
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
killid = keeper;
}
}
/* Kill the locker with lockid idmap[killid]. */
- if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
- /*
- * It's possible that the lock was already aborted;
- * this isn't necessarily a problem, so do not treat
- * it as an error.
- */
- if (ret == DB_ALREADY_ABORTED)
- ret = 0;
- else
- __db_err(dbenv,
+ if ((ret = __dd_abort(dbenv, &idmap[killid], &status)) != 0)
+ break;
+
+ /*
+ * It's possible that the lock was already aborted; this isn't
+ * necessarily a problem, so do not treat it as an error.
+ */
+ if (status != 0) {
+ if (status != DB_ALREADY_ABORTED)
+ __db_errx(dbenv,
"warning: unable to abort locker %lx",
(u_long)idmap[killid].id);
} else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
@@ -367,7 +360,7 @@ err: if (free_me != NULL)
* Utilities
*/
-# define DD_INVALID_ID ((u_int32_t) -1)
+#define DD_INVALID_ID ((u_int32_t) -1)
static int
__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
@@ -384,26 +377,43 @@ __dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
db_timeval_t now, min_timeout;
u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
u_int8_t *pptr;
- int expire_only, is_first, ret;
+ int is_first, ret;
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
LOCK_SET_TIME_INVALID(&now);
LOCK_SET_TIME_MAX(&min_timeout);
- expire_only = atype == DB_LOCK_EXPIRE;
/*
- * While we always check for expired timeouts, if we are called
- * with DB_LOCK_EXPIRE, then we are only checking for timeouts
- * (i.e., not doing deadlock detection at all). If we aren't
- * doing real deadlock detection, then we can skip a significant,
- * amount of the processing. In particular we do not build
- * the conflict array and our caller needs to expect this.
+ * While we always check for expired timeouts, if we are called with
+ * DB_LOCK_EXPIRE, then we are only checking for timeouts (i.e., not
+ * doing deadlock detection at all). If we aren't doing real deadlock
+ * detection, then we can skip a significant, amount of the processing.
+ * In particular we do not build the conflict array and our caller
+ * needs to expect this.
*/
- if (expire_only) {
- count = 0;
- nentries = 0;
- goto obj_loop;
+ if (atype == DB_LOCK_EXPIRE) {
+ SH_TAILQ_FOREACH(op, &region->dd_objs, dd_links, __db_lockobj)
+ SH_TAILQ_FOREACH(lp, &op->waiters, links, __db_lock) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (lp->status == DB_LSTAT_WAITING) {
+ if (__lock_expired(dbenv,
+ &now, &lockerp->lk_expire)) {
+ lp->status = DB_LSTAT_EXPIRED;
+ MUTEX_UNLOCK(
+ dbenv, lp->mtx_lock);
+ continue;
+ }
+ if (LOCK_TIME_GREATER(
+ &min_timeout, &lockerp->lk_expire))
+ min_timeout =
+ lockerp->lk_expire;
+ }
+ }
+ goto done;
}
/*
@@ -413,7 +423,6 @@ __dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
* mutex the second time.
*/
retry: count = region->stat.st_nlockers;
-
if (count == 0) {
*nlockers = 0;
return (0);
@@ -462,9 +471,8 @@ retry: count = region->stat.st_nlockers;
/*
* First we go through and assign each locker a deadlock detector id.
*/
- for (id = 0, lip = SH_TAILQ_FIRST(&region->lockers, __db_locker);
- lip != NULL;
- lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
+ id = 0;
+ SH_TAILQ_FOREACH(lip, &region->lockers, ulinks, __db_locker) {
if (lip->master_locker == INVALID_ROFF) {
lip->dd_id = id++;
id_array[lip->dd_id].id = lip->id;
@@ -477,6 +485,8 @@ retry: count = region->stat.st_nlockers;
case DB_LOCK_MAXWRITE:
id_array[lip->dd_id].count = lip->nwrites;
break;
+ default:
+ break;
}
if (F_ISSET(lip, DB_LOCKER_INABORT))
id_array[lip->dd_id].in_abort = 1;
@@ -492,20 +502,14 @@ retry: count = region->stat.st_nlockers;
* list and add an entry in the waitsfor matrix for each waiter/holder
* combination.
*/
-obj_loop:
- for (op = SH_TAILQ_FIRST(&region->dd_objs, __db_lockobj);
- op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
- if (expire_only)
- goto look_waiters;
+ SH_TAILQ_FOREACH(op, &region->dd_objs, dd_links, __db_lockobj) {
CLEAR_MAP(tmpmap, nentries);
/*
* First we go through and create a bit map that
* represents all the holders of this object.
*/
- for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
- lp != NULL;
- lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ SH_TAILQ_FOREACH(lp, &op->holders, links, __db_lock) {
LOCKER_LOCK(lt, region, lp->holder, ndx);
if ((ret = __lock_getlocker(lt,
lp->holder, ndx, 0, &lockerp)) != 0)
@@ -524,6 +528,8 @@ obj_loop:
case DB_LOCK_MAXWRITE:
id_array[dd].count += lockerp->nwrites;
break;
+ default:
+ break;
}
if (F_ISSET(lockerp, DB_LOCKER_INABORT))
id_array[dd].in_abort = 1;
@@ -544,7 +550,6 @@ obj_loop:
* Next, for each waiter, we set its row in the matrix
* equal to the map of holders we set up above.
*/
-look_waiters:
for (is_first = 1,
lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
lp != NULL;
@@ -558,18 +563,14 @@ look_waiters:
if (__lock_expired(dbenv,
&now, &lockerp->lk_expire)) {
lp->status = DB_LSTAT_EXPIRED;
- MUTEX_UNLOCK(dbenv, &lp->mutex);
+ MUTEX_UNLOCK(dbenv, lp->mtx_lock);
continue;
}
if (LOCK_TIME_GREATER(
&min_timeout, &lockerp->lk_expire))
min_timeout = lockerp->lk_expire;
-
}
- if (expire_only)
- continue;
-
if (lockerp->dd_id == DD_INVALID_ID) {
dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
lockerp->master_locker))->dd_id;
@@ -583,6 +584,8 @@ look_waiters:
case DB_LOCK_MAXWRITE:
id_array[dd].count += lockerp->nwrites;
break;
+ default:
+ break;
}
} else
dd = lockerp->dd_id;
@@ -612,15 +615,6 @@ look_waiters:
}
}
- if (LOCK_TIME_ISVALID(&region->next_timeout)) {
- if (LOCK_TIME_ISMAX(&min_timeout))
- LOCK_SET_TIME_INVALID(&region->next_timeout);
- else
- region->next_timeout = min_timeout;
- }
- if (expire_only)
- return (0);
-
/* Now for each locker; record its last lock. */
for (id = 0; id < count; id++) {
if (!id_array[id].valid)
@@ -628,7 +622,7 @@ look_waiters:
LOCKER_LOCK(lt, region, id_array[id].id, ndx);
if ((ret = __lock_getlocker(lt,
id_array[id].id, ndx, 0, &lockerp)) != 0) {
- __db_err(dbenv,
+ __db_errx(dbenv,
"No locks for locker %lu", (u_long)id_array[id].id);
continue;
}
@@ -680,6 +674,12 @@ get_lock: id_array[id].last_lock = R_OFFSET(&lt->reginfo, lp);
*bmp = bitmap;
*allocp = nentries;
__os_free(dbenv, tmpmap);
+done: if (LOCK_TIME_ISVALID(&region->next_timeout)) {
+ if (LOCK_TIME_ISMAX(&min_timeout))
+ LOCK_SET_TIME_INVALID(&region->next_timeout);
+ else
+ region->next_timeout = min_timeout;
+ }
return (0);
}
@@ -750,9 +750,10 @@ __dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
}
static int
-__dd_abort(dbenv, info)
+__dd_abort(dbenv, info, statusp)
DB_ENV *dbenv;
locker_info *info;
+ int *statusp;
{
struct __db_lock *lockp;
DB_LOCKER *lockerp;
@@ -762,37 +763,39 @@ __dd_abort(dbenv, info)
u_int32_t ndx;
int ret;
+ *statusp = 0;
+
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
+ ret = 0;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
/*
- * Get the locker. If its gone or was aborted while
- * we were detecting return that.
+ * Get the locker. If it's gone or was aborted while we were
+ * detecting, return that.
*/
LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
if ((ret = __lock_getlocker(lt,
- info->last_locker_id, ndx, 0, &lockerp)) != 0 ||
- lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) {
- if (ret == 0)
- ret = DB_ALREADY_ABORTED;
+ info->last_locker_id, ndx, 0, &lockerp)) != 0)
+ goto err;
+ if (lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) {
+ *statusp = DB_ALREADY_ABORTED;
goto out;
}
/*
- * Find the locker's last lock.
- * It is possible for this lock to have been freed,
- * either though a timeout or another detector run.
+ * Find the locker's last lock. It is possible for this lock to have
+ * been freed, either though a timeout or another detector run.
*/
if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
- ret = DB_ALREADY_ABORTED;
+ *statusp = DB_ALREADY_ABORTED;
goto out;
}
if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
lockp->holder != lockerp->id ||
lockp->obj != info->last_obj || lockp->status != DB_LSTAT_WAITING) {
- ret = DB_ALREADY_ABORTED;
+ *statusp = DB_ALREADY_ABORTED;
goto out;
}
@@ -812,15 +815,12 @@ __dd_abort(dbenv, info)
SH_TAILQ_REMOVE(&region->dd_objs,
sh_obj, dd_links, __db_lockobj);
else
- ret = __lock_promote(lt, sh_obj, 0);
- MUTEX_UNLOCK(dbenv, &lockp->mutex);
+ ret = __lock_promote(lt, sh_obj, NULL, 0);
+ MUTEX_UNLOCK(dbenv, lockp->mtx_lock);
region->stat.st_ndeadlocks++;
- UNLOCKREGION(dbenv, lt);
-
- return (0);
-
-out: UNLOCKREGION(dbenv, lt);
+err:
+out: LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
diff --git a/db/lock/lock_failchk.c b/db/lock/lock_failchk.c
new file mode 100644
index 000000000..55f729694
--- /dev/null
+++ b/db/lock/lock_failchk.c
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2005-2006
+ * Oracle Corporation. All rights reserved.
+ *
+ * $Id: lock_failchk.c,v 12.9 2006/08/24 14:46:11 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#include "db_int.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+/*
+ * __lock_failchk --
+ * Check for locks held by dead threads of control.
+ *
+ * PUBLIC: int __lock_failchk __P((DB_ENV *));
+ */
+int
+__lock_failchk(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKER *lip;
+ DB_LOCKREGION *lrp;
+ DB_LOCKREQ request;
+ DB_LOCKTAB *lt;
+ u_int32_t i;
+ int ret;
+ char buf[DB_THREADID_STRLEN];
+
+ lt = dbenv->lk_handle;
+ lrp = lt->reginfo.primary;
+
+retry: LOCK_SYSTEM_LOCK(dbenv);
+
+ ret = 0;
+ for (i = 0; i < lrp->locker_t_size; i++)
+ SH_TAILQ_FOREACH(lip, &lt->locker_tab[i], links, __db_locker) {
+ /*
+ * If the locker is transactional, we can ignore it;
+ * __txn_failchk aborts any transactions the locker
+ * is involved in.
+ */
+ if (lip->id >= TXN_MINIMUM)
+ continue;
+
+ /* If the locker is still alive, it's not a problem. */
+ if (dbenv->is_alive(dbenv, lip->pid, lip->tid, 0))
+ continue;
+
+ /*
+ * We can only deal with read locks. If the locker
+ * holds write locks we have to assume a Berkeley DB
+ * operation was interrupted with only 1-of-N pages
+ * modified.
+ */
+ if (lip->nwrites != 0) {
+ ret = __db_failed(dbenv,
+ "locker has write locks",
+ lip->pid, lip->tid);
+ break;
+ }
+
+ /*
+ * Discard the locker and its read locks.
+ */
+ __db_msg(dbenv, "Freeing locks for locker %#lx: %s",
+ (u_long)lip->id, dbenv->thread_id_string(
+ dbenv, lip->pid, lip->tid, buf));
+ LOCK_SYSTEM_UNLOCK(dbenv);
+ memset(&request, 0, sizeof(request));
+ request.op = DB_LOCK_PUT_ALL;
+ if ((ret = __lock_vec(
+ dbenv, lip->id, 0, &request, 1, NULL)) != 0)
+ return (ret);
+
+ /*
+ * This locker is most likely referenced by a cursor
+ * which is owned by a dead thread. Normally the
+ * cursor would be available for other threads
+ * but we assume the dead thread will never release
+ * it.
+ */
+ if ((ret = __lock_freefamilylocker(lt, lip->id)) != 0)
+ return (ret);
+ goto retry;
+ }
+
+ LOCK_SYSTEM_UNLOCK(dbenv);
+
+ return (ret);
+}
diff --git a/db/lock/lock_id.c b/db/lock/lock_id.c
index 02f85765d..0e9fb14dd 100644
--- a/db/lock/lock_id.c
+++ b/db/lock/lock_id.c
@@ -1,23 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_id.c,v 11.146 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_id.c,v 12.16 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#include <stdlib.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
@@ -32,18 +24,16 @@ __lock_id_pp(dbenv, idp)
DB_ENV *dbenv;
u_int32_t *idp;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_id(dbenv, idp);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_id(dbenv, idp, NULL)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -51,31 +41,34 @@ __lock_id_pp(dbenv, idp)
* __lock_id --
* DB_ENV->lock_id.
*
- * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *));
+ * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *, DB_LOCKER **));
*/
int
-__lock_id(dbenv, idp)
+__lock_id(dbenv, idp, lkp)
DB_ENV *dbenv;
u_int32_t *idp;
+ DB_LOCKER **lkp;
{
DB_LOCKER *lk;
DB_LOCKTAB *lt;
DB_LOCKREGION *region;
- u_int32_t *ids, locker_ndx;
+ u_int32_t id, *ids, locker_ndx;
int nids, ret;
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
ret = 0;
+ id = DB_LOCK_INVALIDID;
+ lk = NULL;
+
+ LOCK_SYSTEM_LOCK(dbenv);
+
/*
- * Allocate a new lock id. If we wrap around then we
- * find the minimum currently in use and make sure we
- * can stay below that. This code is similar to code
- * in __txn_begin_int for recovering txn ids.
- */
- LOCKREGION(dbenv, lt);
- /*
+ * Allocate a new lock id. If we wrap around then we find the minimum
+ * currently in use and make sure we can stay below that. This code is
+ * similar to code in __txn_begin_int for recovering txn ids.
+ *
* Our current valid range can span the maximum valid value, so check
* for it and wrap manually.
*/
@@ -87,9 +80,7 @@ __lock_id(dbenv, idp)
sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0)
goto err;
nids = 0;
- for (lk = SH_TAILQ_FIRST(&region->lockers, __db_locker);
- lk != NULL;
- lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker))
+ SH_TAILQ_FOREACH(lk, &region->lockers, ulinks, __db_locker)
ids[nids++] = lk->id;
region->stat.st_id = DB_LOCK_INVALIDID;
region->stat.st_cur_maxid = DB_LOCK_MAXID;
@@ -98,18 +89,37 @@ __lock_id(dbenv, idp)
&region->stat.st_id, &region->stat.st_cur_maxid);
__os_free(dbenv, ids);
}
- *idp = ++region->stat.st_id;
+ id = ++region->stat.st_id;
/* Allocate a locker for this id. */
- LOCKER_LOCK(lt, region, *idp, locker_ndx);
- ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);
+ LOCKER_LOCK(lt, region, id, locker_ndx);
+ ret = __lock_getlocker(lt, id, locker_ndx, 1, &lk);
-err: UNLOCKREGION(dbenv, lt);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
+ if (idp)
+ *idp = id;
+ if (lkp)
+ *lkp = lk;
return (ret);
}
/*
+ * __lock_set_thread_id --
+ * Set the thread_id in an existing locker.
+ * PUBLIC: void __lock_set_thread_id __P((DB_LOCKER *, pid_t, db_threadid_t));
+ */
+void
+__lock_set_thread_id(lref, pid, tid)
+ DB_LOCKER *lref;
+ pid_t pid;
+ db_threadid_t tid;
+{
+ lref->pid = pid;
+ lref->tid = tid;
+}
+
+/*
* __lock_id_free_pp --
* DB_ENV->lock_id_free pre/post processing.
*
@@ -120,18 +130,16 @@ __lock_id_free_pp(dbenv, id)
DB_ENV *dbenv;
u_int32_t id;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_id_free(dbenv, id);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_id_free(dbenv, id)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -159,26 +167,26 @@ __lock_id_free(dbenv, id)
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
LOCKER_LOCK(lt, region, id, locker_ndx);
if ((ret = __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0)
goto err;
if (sh_locker == NULL) {
- __db_err(dbenv, "Unknown locker ID: %lx", (u_long)id);
+ __db_errx(dbenv, "Unknown locker ID: %lx", (u_long)id);
ret = EINVAL;
goto err;
}
if (sh_locker->nlocks != 0) {
- __db_err(dbenv, "Locker still has locks");
+ __db_errx(dbenv, "Locker still has locks");
ret = EINVAL;
goto err;
}
__lock_freelocker(lt, region, sh_locker, locker_ndx);
-err: UNLOCKREGION(dbenv, lt);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -233,13 +241,13 @@ __lock_getlocker(lt, locker, indx, create, retp)
dbenv = lt->dbenv;
region = lt->reginfo.primary;
- HASHLOOKUP(lt->locker_tab,
- indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp);
-
/*
- * If we found the locker, then we can just return it. If
- * we didn't find the locker, then we need to create it.
+ * If we find the locker, then we can just return it. If we don't find
+ * the locker, then we need to create it.
*/
+ SH_TAILQ_FOREACH(sh_locker, &lt->locker_tab[indx], links, __db_locker)
+ if (sh_locker->id == locker)
+ break;
if (sh_locker == NULL && create) {
/* Create new locker and then insert it into hash table. */
if ((sh_locker = SH_TAILQ_FIRST(
@@ -251,6 +259,7 @@ __lock_getlocker(lt, locker, indx, create, retp)
region->stat.st_maxnlockers = region->stat.st_nlockers;
sh_locker->id = locker;
+ dbenv->thread_id(dbenv, &sh_locker->pid, &sh_locker->tid);
sh_locker->dd_id = 0;
sh_locker->master_locker = INVALID_ROFF;
sh_locker->parent_locker = INVALID_ROFF;
@@ -263,7 +272,8 @@ __lock_getlocker(lt, locker, indx, create, retp)
LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
- HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(
+ &lt->locker_tab[indx], sh_locker, links, __db_locker);
SH_TAILQ_INSERT_HEAD(&region->lockers,
sh_locker, ulinks, __db_locker);
}
@@ -291,12 +301,11 @@ __lock_addfamilylocker(dbenv, pid, id)
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
/* get/create the parent locker info */
LOCKER_LOCK(lt, region, pid, ndx);
- if ((ret = __lock_getlocker(dbenv->lk_handle,
- pid, ndx, 1, &mlockerp)) != 0)
+ if ((ret = __lock_getlocker(lt, pid, ndx, 1, &mlockerp)) != 0)
goto err;
/*
@@ -307,8 +316,7 @@ __lock_addfamilylocker(dbenv, pid, id)
* family be created at the same time.
*/
LOCKER_LOCK(lt, region, id, ndx);
- if ((ret = __lock_getlocker(dbenv->lk_handle,
- id, ndx, 1, &lockerp)) != 0)
+ if ((ret = __lock_getlocker(lt, id, ndx, 1, &lockerp)) != 0)
goto err;
/* Point to our parent. */
@@ -330,8 +338,7 @@ __lock_addfamilylocker(dbenv, pid, id)
SH_LIST_INSERT_HEAD(
&mlockerp->child_locker, lockerp, child_link, __db_locker);
-err:
- UNLOCKREGION(dbenv, lt);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -358,7 +365,7 @@ __lock_freefamilylocker(lt, locker)
dbenv = lt->dbenv;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
LOCKER_LOCK(lt, region, locker, indx);
if ((ret = __lock_getlocker(lt,
@@ -367,7 +374,7 @@ __lock_freefamilylocker(lt, locker)
if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) {
ret = EINVAL;
- __db_err(dbenv, "Freeing locker with locks");
+ __db_errx(dbenv, "Freeing locker with locks");
goto err;
}
@@ -377,8 +384,7 @@ __lock_freefamilylocker(lt, locker)
__lock_freelocker(lt, region, sh_locker, indx);
-err:
- UNLOCKREGION(dbenv, lt);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -398,8 +404,7 @@ __lock_freelocker(lt, region, sh_locker, indx)
u_int32_t indx;
{
- HASHREMOVE_EL(
- lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_REMOVE(&lt->locker_tab[indx], sh_locker, links, __db_locker);
SH_TAILQ_INSERT_HEAD(
&region->free_lockers, sh_locker, links, __db_locker);
SH_TAILQ_REMOVE(&region->lockers, sh_locker, ulinks, __db_locker);
diff --git a/db/lock/lock_list.c b/db/lock/lock_list.c
index 5851dc7fa..ab1db32e0 100644
--- a/db/lock/lock_list.c
+++ b/db/lock/lock_list.c
@@ -1,23 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_list.c,v 11.146 2004/09/22 03:48:29 bostic Exp $
+ * $Id: lock_list.c,v 12.9 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#include <stdlib.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
@@ -240,16 +232,30 @@ __lock_get_list(dbenv, locker, flags, lock_mode, list)
u_int16_t npgno, size;
u_int32_t i, nlocks;
int ret;
- void *dp;
+ void *data, *dp;
if (list->size == 0)
return (0);
ret = 0;
+ data = NULL;
+
lt = dbenv->lk_handle;
dp = list->data;
+ /*
+ * There is no assurance log records will be aligned. If not, then
+ * copy the data to an aligned region so the rest of the code does
+ * not have to worry about it.
+ */
+ if ((uintptr_t)dp != DB_ALIGN((uintptr_t)dp, sizeof(u_int32_t))) {
+ if ((ret = __os_malloc(dbenv, list->size, &data)) != 0)
+ return (ret);
+ memcpy(data, list->data, list->size);
+ dp = data;
+ }
+
GET_COUNT(dp, nlocks);
- LOCKREGION(dbenv, dbenv->lk_handle);
+ LOCK_SYSTEM_LOCK(dbenv);
for (i = 0; i < nlocks; i++) {
GET_PCOUNT(dp, npgno);
@@ -271,8 +277,9 @@ __lock_get_list(dbenv, locker, flags, lock_mode, list)
lock->pgno = save_pgno;
}
-err:
- UNLOCKREGION(dbenv, dbenv->lk_handle);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
+ if (data != NULL)
+ __os_free(dbenv, data);
return (ret);
}
diff --git a/db/lock/lock_method.c b/db/lock/lock_method.c
index d57179493..0ca08dc01 100644
--- a/db/lock/lock_method.c
+++ b/db/lock/lock_method.c
@@ -1,51 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_method.c,v 11.44 2004/06/01 21:50:05 bostic Exp $
+ * $Id: lock_method.c,v 12.13 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#ifdef HAVE_RPC
-#include <rpc/rpc.h>
-#endif
-
-#include <string.h>
-#endif
-
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
-#ifdef HAVE_RPC
-#include "dbinc_auto/rpc_client_ext.h"
-#endif
-
-static int __lock_get_lk_conflicts __P((DB_ENV *, const u_int8_t **, int *));
-static int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
-static int __lock_get_lk_detect __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_lockers __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_locks __P((DB_ENV *, u_int32_t *));
-static int __lock_get_lk_max_objects __P((DB_ENV *, u_int32_t *));
-static int __lock_get_env_timeout __P((DB_ENV *, db_timeout_t *, u_int32_t));
-
/*
* __lock_dbenv_create --
* Lock specific creation of the DB_ENV structure.
*
- * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ * PUBLIC: int __lock_dbenv_create __P((DB_ENV *));
*/
-void
+int
__lock_dbenv_create(dbenv)
DB_ENV *dbenv;
{
@@ -55,100 +28,56 @@ __lock_dbenv_create(dbenv)
* state or turn off mutex locking, and so we can neither check
* the panic state or acquire a mutex in the DB_ENV create path.
*/
-
dbenv->lk_max = DB_LOCK_DEFAULT_N;
dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
- dbenv->get_lk_conflicts = __dbcl_get_lk_conflicts;
- dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
- dbenv->get_lk_detect = __dbcl_get_lk_detect;
- dbenv->set_lk_detect = __dbcl_set_lk_detect;
- dbenv->set_lk_max = __dbcl_set_lk_max;
- dbenv->get_lk_max_lockers = __dbcl_get_lk_max_lockers;
- dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
- dbenv->get_lk_max_locks = __dbcl_get_lk_max_locks;
- dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
- dbenv->get_lk_max_objects = __dbcl_get_lk_max_objects;
- dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
-
- dbenv->lock_detect = __dbcl_lock_detect;
- dbenv->lock_get = __dbcl_lock_get;
- dbenv->lock_id = __dbcl_lock_id;
- dbenv->lock_id_free = __dbcl_lock_id_free;
- dbenv->lock_put = __dbcl_lock_put;
- dbenv->lock_stat = __dbcl_lock_stat;
- dbenv->lock_stat_print = NULL;
- dbenv->lock_vec = __dbcl_lock_vec;
- } else
-#endif
- {
- dbenv->get_lk_conflicts = __lock_get_lk_conflicts;
- dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
- dbenv->get_lk_detect = __lock_get_lk_detect;
- dbenv->set_lk_detect = __lock_set_lk_detect;
- dbenv->set_lk_max = __lock_set_lk_max;
- dbenv->get_lk_max_lockers = __lock_get_lk_max_lockers;
- dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
- dbenv->get_lk_max_locks = __lock_get_lk_max_locks;
- dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
- dbenv->get_lk_max_objects = __lock_get_lk_max_objects;
- dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
- dbenv->get_timeout = __lock_get_env_timeout;
- dbenv->set_timeout = __lock_set_env_timeout;
-
- dbenv->lock_detect = __lock_detect_pp;
- dbenv->lock_get = __lock_get_pp;
- dbenv->lock_id = __lock_id_pp;
- dbenv->lock_id_free = __lock_id_free_pp;
- dbenv->lock_put = __lock_put_pp;
- dbenv->lock_stat = __lock_stat_pp;
- dbenv->lock_stat_print = __lock_stat_print_pp;
- dbenv->lock_vec = __lock_vec_pp;
- }
+ return (0);
}
/*
- * __lock_dbenv_close --
+ * __lock_dbenv_destroy --
* Lock specific destruction of the DB_ENV structure.
*
- * PUBLIC: int __lock_dbenv_close __P((DB_ENV *));
+ * PUBLIC: void __lock_dbenv_destroy __P((DB_ENV *));
*/
-int
-__lock_dbenv_close(dbenv)
+void
+__lock_dbenv_destroy(dbenv)
DB_ENV *dbenv;
{
if (dbenv->lk_conflicts != NULL) {
__os_free(dbenv, dbenv->lk_conflicts);
dbenv->lk_conflicts = NULL;
}
-
- return (0);
}
/*
* __lock_get_lk_conflicts
* Get the conflicts matrix.
+ *
+ * PUBLIC: int __lock_get_lk_conflicts
+ * PUBLIC: __P((DB_ENV *, const u_int8_t **, int *));
*/
-static int
+int
__lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_modesp)
DB_ENV *dbenv;
const u_int8_t **lk_conflictsp;
int *lk_modesp;
{
+ DB_LOCKTAB *lt;
+
ENV_NOT_CONFIGURED(dbenv,
dbenv->lk_handle, "DB_ENV->get_lk_conflicts", DB_INIT_LOCK);
+ lt = dbenv->lk_handle;
+
if (LOCKING_ON(dbenv)) {
/* Cannot be set after open, no lock required to read. */
if (lk_conflictsp != NULL)
- *lk_conflictsp =
- ((DB_LOCKTAB *)dbenv->lk_handle)->conflicts;
+ *lk_conflictsp = lt->conflicts;
if (lk_modesp != NULL)
- *lk_modesp = ((DB_LOCKREGION *)((DB_LOCKTAB *)
- dbenv->lk_handle)->reginfo.primary)->stat.st_nmodes;
+ *lk_modesp = ((DB_LOCKREGION *)
+ (lt->reginfo.primary))->stat.st_nmodes;
} else {
if (lk_conflictsp != NULL)
*lk_conflictsp = dbenv->lk_conflicts;
@@ -161,8 +90,10 @@ __lock_get_lk_conflicts(dbenv, lk_conflictsp, lk_modesp)
/*
* __lock_set_lk_conflicts
* Set the conflicts matrix.
+ *
+ * PUBLIC: int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
*/
-static int
+int
__lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
DB_ENV *dbenv;
u_int8_t *lk_conflicts;
@@ -186,7 +117,10 @@ __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
return (0);
}
-static int
+/*
+ * PUBLIC: int __lock_get_lk_detect __P((DB_ENV *, u_int32_t *));
+ */
+int
__lock_get_lk_detect(dbenv, lk_detectp)
DB_ENV *dbenv;
u_int32_t *lk_detectp;
@@ -198,10 +132,9 @@ __lock_get_lk_detect(dbenv, lk_detectp)
if (LOCKING_ON(dbenv)) {
lt = dbenv->lk_handle;
- LOCKREGION(dbenv, lt);
- *lk_detectp = ((DB_LOCKREGION *)
- ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary)->detect;
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
+ *lk_detectp = ((DB_LOCKREGION *)lt->reginfo.primary)->detect;
+ LOCK_SYSTEM_UNLOCK(dbenv);
} else
*lk_detectp = dbenv->lk_detect;
return (0);
@@ -237,7 +170,7 @@ __lock_set_lk_detect(dbenv, lk_detect)
case DB_LOCK_YOUNGEST:
break;
default:
- __db_err(dbenv,
+ __db_errx(dbenv,
"DB_ENV->set_lk_detect: unknown deadlock detection mode specified");
return (EINVAL);
}
@@ -246,7 +179,7 @@ __lock_set_lk_detect(dbenv, lk_detect)
if (LOCKING_ON(dbenv)) {
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
/*
* Check for incompatible automatic deadlock detection requests.
* There are scenarios where changing the detector configuration
@@ -259,13 +192,13 @@ __lock_set_lk_detect(dbenv, lk_detect)
if (region->detect != DB_LOCK_NORUN &&
lk_detect != DB_LOCK_DEFAULT &&
region->detect != lk_detect) {
- __db_err(dbenv,
+ __db_errx(dbenv,
"DB_ENV->set_lk_detect: incompatible deadlock detector mode");
ret = EINVAL;
} else
if (region->detect == DB_LOCK_NORUN)
region->detect = lk_detect;
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
} else
dbenv->lk_detect = lk_detect;
@@ -273,25 +206,9 @@ __lock_set_lk_detect(dbenv, lk_detect)
}
/*
- * __lock_set_lk_max
- * DB_ENV->set_lk_max.
- *
- * PUBLIC: int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+ * PUBLIC: int __lock_get_lk_max_locks __P((DB_ENV *, u_int32_t *));
*/
int
-__lock_set_lk_max(dbenv, lk_max)
- DB_ENV *dbenv;
- u_int32_t lk_max;
-{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_lk_max");
-
- dbenv->lk_max = lk_max;
- dbenv->lk_max_objects = lk_max;
- dbenv->lk_max_lockers = lk_max;
- return (0);
-}
-
-static int
__lock_get_lk_max_locks(dbenv, lk_maxp)
DB_ENV *dbenv;
u_int32_t *lk_maxp;
@@ -301,8 +218,8 @@ __lock_get_lk_max_locks(dbenv, lk_maxp)
if (LOCKING_ON(dbenv)) {
/* Cannot be set after open, no lock required to read. */
- *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *)
- dbenv->lk_handle)->reginfo.primary)->stat.st_maxlocks;
+ *lk_maxp = ((DB_LOCKREGION *)
+ dbenv->lk_handle->reginfo.primary)->stat.st_maxlocks;
} else
*lk_maxp = dbenv->lk_max;
return (0);
@@ -325,7 +242,10 @@ __lock_set_lk_max_locks(dbenv, lk_max)
return (0);
}
-static int
+/*
+ * PUBLIC: int __lock_get_lk_max_lockers __P((DB_ENV *, u_int32_t *));
+ */
+int
__lock_get_lk_max_lockers(dbenv, lk_maxp)
DB_ENV *dbenv;
u_int32_t *lk_maxp;
@@ -335,8 +255,8 @@ __lock_get_lk_max_lockers(dbenv, lk_maxp)
if (LOCKING_ON(dbenv)) {
/* Cannot be set after open, no lock required to read. */
- *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *)
- dbenv->lk_handle)->reginfo.primary)->stat.st_maxlockers;
+ *lk_maxp = ((DB_LOCKREGION *)
+ dbenv->lk_handle->reginfo.primary)->stat.st_maxlockers;
} else
*lk_maxp = dbenv->lk_max_lockers;
return (0);
@@ -359,7 +279,10 @@ __lock_set_lk_max_lockers(dbenv, lk_max)
return (0);
}
-static int
+/*
+ * PUBLIC: int __lock_get_lk_max_objects __P((DB_ENV *, u_int32_t *));
+ */
+int
__lock_get_lk_max_objects(dbenv, lk_maxp)
DB_ENV *dbenv;
u_int32_t *lk_maxp;
@@ -369,8 +292,8 @@ __lock_get_lk_max_objects(dbenv, lk_maxp)
if (LOCKING_ON(dbenv)) {
/* Cannot be set after open, no lock required to read. */
- *lk_maxp = ((DB_LOCKREGION *)((DB_LOCKTAB *)
- dbenv->lk_handle)->reginfo.primary)->stat.st_maxobjects;
+ *lk_maxp = ((DB_LOCKREGION *)
+ dbenv->lk_handle->reginfo.primary)->stat.st_maxobjects;
} else
*lk_maxp = dbenv->lk_max_objects;
return (0);
@@ -393,7 +316,11 @@ __lock_set_lk_max_objects(dbenv, lk_max)
return (0);
}
-static int
+/*
+ * PUBLIC: int __lock_get_env_timeout
+ * PUBLIC: __P((DB_ENV *, db_timeout_t *, u_int32_t));
+ */
+int
__lock_get_env_timeout(dbenv, timeoutp, flag)
DB_ENV *dbenv;
db_timeout_t *timeoutp;
@@ -410,7 +337,7 @@ __lock_get_env_timeout(dbenv, timeoutp, flag)
if (LOCKING_ON(dbenv)) {
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
switch (flag) {
case DB_SET_LOCK_TIMEOUT:
*timeoutp = region->lk_timeout;
@@ -422,7 +349,7 @@ __lock_get_env_timeout(dbenv, timeoutp, flag)
ret = 1;
break;
}
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
} else
switch (flag) {
case DB_SET_LOCK_TIMEOUT:
@@ -465,7 +392,7 @@ __lock_set_env_timeout(dbenv, timeout, flags)
if (LOCKING_ON(dbenv)) {
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
switch (flags) {
case DB_SET_LOCK_TIMEOUT:
region->lk_timeout = timeout;
@@ -477,7 +404,7 @@ __lock_set_env_timeout(dbenv, timeout, flags)
ret = 1;
break;
}
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
} else
switch (flags) {
case DB_SET_LOCK_TIMEOUT:
diff --git a/db/lock/lock_region.c b/db/lock/lock_region.c
index b03dc74f1..c3a1b401d 100644
--- a/db/lock/lock_region.c
+++ b/db/lock/lock_region.c
@@ -1,32 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_region.c,v 11.82 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_region.c,v 12.11 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
static int __lock_region_init __P((DB_ENV *, DB_LOCKTAB *));
static size_t
__lock_region_size __P((DB_ENV *));
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-static size_t __lock_region_maint __P((DB_ENV *));
-#endif
-
/*
* The conflict arrays are set up such that the row is the lock you are
* holding and the column is the lock that is desired.
@@ -73,7 +62,9 @@ __lock_open(dbenv)
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
size_t size;
- int ret;
+ int region_locked, ret;
+
+ region_locked = 0;
/* Create the lock table structure. */
if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOCKTAB), &lt)) != 0)
@@ -100,6 +91,16 @@ __lock_open(dbenv)
region = lt->reginfo.primary =
R_ADDR(&lt->reginfo, lt->reginfo.rp->primary);
+ /* Set remaining pointers into region. */
+ lt->conflicts = R_ADDR(&lt->reginfo, region->conf_off);
+ lt->obj_tab = R_ADDR(&lt->reginfo, region->obj_off);
+ lt->locker_tab = R_ADDR(&lt->reginfo, region->locker_off);
+
+ dbenv->lk_handle = lt;
+
+ LOCK_SYSTEM_LOCK(dbenv);
+ region_locked = 1;
+
if (dbenv->lk_detect != DB_LOCK_NORUN) {
/*
* Check for incompatible automatic deadlock detection requests.
@@ -113,7 +114,7 @@ __lock_open(dbenv)
if (region->detect != DB_LOCK_NORUN &&
dbenv->lk_detect != DB_LOCK_DEFAULT &&
region->detect != dbenv->lk_detect) {
- __db_err(dbenv,
+ __db_errx(dbenv,
"lock_open: incompatible deadlock detector mode");
ret = EINVAL;
goto err;
@@ -131,22 +132,18 @@ __lock_open(dbenv)
if (dbenv->tx_timeout != 0)
region->tx_timeout = dbenv->tx_timeout;
- /* Set remaining pointers into region. */
- lt->conflicts = R_ADDR(&lt->reginfo, region->conf_off);
- lt->obj_tab = R_ADDR(&lt->reginfo, region->obj_off);
- lt->locker_tab = R_ADDR(&lt->reginfo, region->locker_off);
+ LOCK_SYSTEM_UNLOCK(dbenv);
+ region_locked = 0;
- R_UNLOCK(dbenv, &lt->reginfo);
-
- dbenv->lk_handle = lt;
return (0);
-err: if (lt->reginfo.addr != NULL) {
- if (F_ISSET(&lt->reginfo, REGION_CREATE))
- ret = __db_panic(dbenv, ret);
- R_UNLOCK(dbenv, &lt->reginfo);
+err: dbenv->lk_handle = NULL;
+ if (lt->reginfo.addr != NULL) {
+ if (region_locked)
+ LOCK_SYSTEM_UNLOCK(dbenv);
(void)__db_r_detach(dbenv, &lt->reginfo, 0);
}
+
__os_free(dbenv, lt);
return (ret);
}
@@ -165,9 +162,6 @@ __lock_region_init(dbenv, lt)
DB_LOCKER *lidp;
DB_LOCKOBJ *op;
DB_LOCKREGION *region;
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- size_t maint_size;
-#endif
u_int32_t i;
u_int8_t *addr;
int lk_modes, ret;
@@ -179,6 +173,10 @@ __lock_region_init(dbenv, lt)
region = lt->reginfo.primary;
memset(region, 0, sizeof(*region));
+ if ((ret = __mutex_alloc(
+ dbenv, MTX_LOCK_REGION, 0, &region->mtx_region)) != 0)
+ return (ret);
+
/* Select a conflict matrix if none specified. */
if (dbenv->lk_modes == 0)
if (CDB_LOCKING(dbenv)) {
@@ -229,33 +227,15 @@ __lock_region_init(dbenv, lt)
__db_hashinit(addr, region->locker_t_size);
region->locker_off = R_OFFSET(&lt->reginfo, addr);
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- maint_size = __lock_region_maint(dbenv);
- /* Allocate room for the locker maintenance info and initialize it. */
- if ((ret = __db_shalloc(&lt->reginfo,
- sizeof(REGMAINT) + maint_size, 0, &addr)) != 0)
- goto mem_err;
- __db_maintinit(&lt->reginfo, addr, maint_size);
- region->maint_off = R_OFFSET(&lt->reginfo, addr);
-#endif
-
- /*
- * Initialize locks onto a free list. Initialize and lock the mutex
- * so that when we need to block, all we need do is try to acquire
- * the mutex.
- */
+ /* Initialize locks onto a free list. */
SH_TAILQ_INIT(&region->free_locks);
for (i = 0; i < region->stat.st_maxlocks; ++i) {
if ((ret = __db_shalloc(&lt->reginfo,
- sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
+ sizeof(struct __db_lock), 0, &lp)) != 0)
goto mem_err;
- lp->status = DB_LSTAT_FREE;
+ lp->mtx_lock = MUTEX_INVALID;
lp->gen = 0;
- if ((ret = __db_mutex_setup(dbenv, &lt->reginfo, &lp->mutex,
- MUTEX_LOGICAL_LOCK | MUTEX_NO_RLOCK | MUTEX_SELF_BLOCK))
- != 0)
- return (ret);
- MUTEX_LOCK(dbenv, &lp->mutex);
+ lp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(&region->free_locks, lp, links, __db_lock);
}
@@ -276,7 +256,7 @@ __lock_region_init(dbenv, lt)
for (i = 0; i < region->stat.st_maxlockers; ++i) {
if ((ret = __db_shalloc(&lt->reginfo,
sizeof(DB_LOCKER), 0, &lidp)) != 0) {
-mem_err: __db_err(dbenv,
+mem_err: __db_errx(dbenv,
"Unable to allocate memory for the lock table");
return (ret);
}
@@ -316,14 +296,13 @@ __lock_dbenv_refresh(dbenv)
*/
if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
/* Discard the conflict matrix. */
- __db_shalloc_free(reginfo, R_ADDR(&lt->reginfo, lr->conf_off));
+ __db_shalloc_free(reginfo, R_ADDR(reginfo, lr->conf_off));
/* Discard the object hash table. */
- __db_shalloc_free(reginfo, R_ADDR(&lt->reginfo, lr->obj_off));
+ __db_shalloc_free(reginfo, R_ADDR(reginfo, lr->obj_off));
/* Discard the locker hash table. */
- __db_shalloc_free(
- reginfo, R_ADDR(&lt->reginfo, lr->locker_off));
+ __db_shalloc_free(reginfo, R_ADDR(reginfo, lr->locker_off));
/* Discard locks. */
while ((lp =
@@ -360,6 +339,19 @@ __lock_dbenv_refresh(dbenv)
}
/*
+ * __lock_region_mutex_count --
+ * Return the number of mutexes the lock region will need.
+ *
+ * PUBLIC: u_int32_t __lock_region_mutex_count __P((DB_ENV *));
+ */
+u_int32_t
+__lock_region_mutex_count(dbenv)
+ DB_ENV *dbenv;
+{
+ return (dbenv->lk_max);
+}
+
+/*
* __lock_region_size --
* Return the region size.
*/
@@ -377,20 +369,16 @@ __lock_region_size(dbenv)
retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 0);
retval += __db_shalloc_size(
(size_t)(dbenv->lk_modes * dbenv->lk_modes), 0);
- retval += __db_shalloc_size(__db_tablesize
- (dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 0);
- retval += __db_shalloc_size(__db_tablesize
- (dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 0);
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 0);
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 0);
retval +=
- __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 0);
-#endif
- retval += __db_shalloc_size
- (sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+ __db_shalloc_size(sizeof(struct __db_lock), 0) * dbenv->lk_max;
retval +=
- __db_shalloc_size(sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
+ __db_shalloc_size(sizeof(DB_LOCKOBJ), 0) * dbenv->lk_max_objects;
retval +=
- __db_shalloc_size(sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
+ __db_shalloc_size(sizeof(DB_LOCKER), 0) * dbenv->lk_max_lockers;
/*
* Include 16 bytes of string space per lock. DB doesn't use it
@@ -403,51 +391,3 @@ __lock_region_size(dbenv)
return (retval);
}
-
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
-/*
- * __lock_region_maint --
- * Return the amount of space needed for region maintenance info.
- */
-static size_t
-__lock_region_maint(dbenv)
- DB_ENV *dbenv;
-{
- size_t s;
-
- s = sizeof(DB_MUTEX *) * dbenv->lk_max;
- return (s);
-}
-#endif
-
-/*
- * __lock_region_destroy
- * Destroy any region maintenance info.
- *
- * PUBLIC: void __lock_region_destroy __P((DB_ENV *, REGINFO *));
- */
-void
-__lock_region_destroy(dbenv, infop)
- DB_ENV *dbenv;
- REGINFO *infop;
-{
- /*
- * This routine is called in two cases: when discarding the mutexes
- * from a previous Berkeley DB run, during recovery, and two, when
- * discarding the mutexes as we shut down the database environment.
- * In the latter case, we also need to discard shared memory segments,
- * this is the last time we use them, and the last region-specific
- * call we make.
- */
-#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- DB_LOCKREGION *lt;
-
- lt = R_ADDR(infop, infop->rp->primary);
-
- __db_shlocks_destroy(infop, R_ADDR(infop, lt->maint_off));
- if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
- __db_shalloc_free(infop, R_ADDR(infop, lt->maint_off));
-#endif
- if (infop->primary != NULL && F_ISSET(dbenv, DB_ENV_PRIVATE))
- __db_shalloc_free(infop, infop->primary);
-}
diff --git a/db/lock/lock_stat.c b/db/lock/lock_stat.c
index 7cf56bb90..90bb57bb7 100644
--- a/db/lock/lock_stat.c
+++ b/db/lock/lock_stat.c
@@ -1,33 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_stat.c,v 11.64 2004/10/15 16:59:42 bostic Exp $
+ * $Id: lock_stat.c,v 12.17 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-#include <string.h>
-#if TIME_WITH_SYS_TIME
-#include <sys/time.h>
-#include <time.h>
-#else
-#if HAVE_SYS_TIME_H
-#include <sys/time.h>
-#else
-#include <time.h>
-#endif
-#endif
-
-#include <ctype.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/db_page.h"
#include "dbinc/lock.h"
#include "dbinc/log.h"
@@ -54,7 +36,8 @@ __lock_stat_pp(dbenv, statp, flags)
DB_LOCK_STAT **statp;
u_int32_t flags;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
@@ -64,12 +47,9 @@ __lock_stat_pp(dbenv, statp, flags)
"DB_ENV->lock_stat", flags, DB_STAT_CLEAR)) != 0)
return (ret);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_stat(dbenv, statp, flags);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_stat(dbenv, statp, flags)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -95,20 +75,20 @@ __lock_stat(dbenv, statp, flags)
return (ret);
/* Copy out the global statistics. */
- R_LOCK(dbenv, &lt->reginfo);
+ LOCK_SYSTEM_LOCK(dbenv);
region = lt->reginfo.primary;
memcpy(stats, &region->stat, sizeof(*stats));
stats->st_locktimeout = region->lk_timeout;
stats->st_txntimeout = region->tx_timeout;
- stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
- stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
+ __mutex_set_wait_info(dbenv, region->mtx_region,
+ &stats->st_region_wait, &stats->st_region_nowait);
stats->st_regsize = lt->reginfo.rp->size;
if (LF_ISSET(DB_STAT_CLEAR)) {
tmp = region->stat;
memset(&region->stat, 0, sizeof(region->stat));
- MUTEX_CLEAR(&lt->reginfo.rp->mutex);
+ __mutex_clear(dbenv, region->mtx_region);
region->stat.st_id = tmp.st_id;
region->stat.st_cur_maxid = tmp.st_cur_maxid;
@@ -124,7 +104,7 @@ __lock_stat(dbenv, statp, flags)
region->stat.st_nmodes = tmp.st_nmodes;
}
- R_UNLOCK(dbenv, &lt->reginfo);
+ LOCK_SYSTEM_UNLOCK(dbenv);
*statp = stats;
return (0);
@@ -141,7 +121,8 @@ __lock_stat_print_pp(dbenv, flags)
DB_ENV *dbenv;
u_int32_t flags;
{
- int rep_check, ret;
+ DB_THREAD_INFO *ip;
+ int ret;
PANIC_CHECK(dbenv);
ENV_REQUIRES_CONFIG(dbenv,
@@ -154,12 +135,9 @@ __lock_stat_print_pp(dbenv, flags)
flags, DB_STAT_CLEAR | DB_STAT_LOCK_FLAGS)) != 0)
return (ret);
- rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
- if (rep_check)
- __env_rep_enter(dbenv);
- ret = __lock_stat_print(dbenv, flags);
- if (rep_check)
- __env_db_rep_exit(dbenv);
+ ENV_ENTER(dbenv, ip);
+ REPLICATION_WRAP(dbenv, (__lock_stat_print(dbenv, flags)), ret);
+ ENV_LEAVE(dbenv, ip);
return (ret);
}
@@ -235,11 +213,15 @@ __lock_print_stats(dbenv, flags)
__db_dl(dbenv,
"Total number of locks released", (u_long)sp->st_nreleases);
__db_dl(dbenv,
- "Total number of lock requests failing because DB_LOCK_NOWAIT was set",
- (u_long)sp->st_nnowaits);
+ "Total number of locks upgraded", (u_long)sp->st_nupgrade);
+ __db_dl(dbenv,
+ "Total number of locks downgraded", (u_long)sp->st_ndowngrade);
__db_dl(dbenv,
- "Total number of locks not immediately available due to conflicts",
- (u_long)sp->st_nconflicts);
+ "Lock requests not available due to conflicts, for which we waited",
+ (u_long)sp->st_lock_wait);
+ __db_dl(dbenv,
+ "Lock requests not available due to conflicts, for which we did not wait",
+ (u_long)sp->st_lock_nowait);
__db_dl(dbenv, "Number of deadlocks", (u_long)sp->st_ndeadlocks);
__db_dl(dbenv, "Lock timeout value", (u_long)sp->st_locktimeout);
__db_dl(dbenv, "Number of locks that have timed out",
@@ -277,31 +259,38 @@ __lock_print_all(dbenv, flags)
DB_MSGBUF mb;
int i, j;
u_int32_t k;
- char buf[64];
lt = dbenv->lk_handle;
lrp = lt->reginfo.primary;
DB_MSGBUF_INIT(&mb);
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
__db_print_reginfo(dbenv, &lt->reginfo, "Lock");
if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_PARAMS)) {
__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
__db_msg(dbenv, "Lock region parameters:");
+ __mutex_print_debug_single(dbenv,
+ "Lock region region mutex", lrp->mtx_region, flags);
STAT_ULONG("locker table size", lrp->locker_t_size);
STAT_ULONG("object table size", lrp->object_t_size);
STAT_ULONG("obj_off", lrp->obj_off);
- STAT_ULONG("osynch_off", lrp->osynch_off);
STAT_ULONG("locker_off", lrp->locker_off);
- STAT_ULONG("lsynch_off", lrp->lsynch_off);
STAT_ULONG("need_dd", lrp->need_dd);
- if (LOCK_TIME_ISVALID(&lrp->next_timeout) &&
- strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S",
- localtime((time_t*)&lrp->next_timeout.tv_sec)) != 0)
- __db_msg(dbenv, "next_timeout: %s.%lu",
- buf, (u_long)lrp->next_timeout.tv_usec);
+ if (LOCK_TIME_ISVALID(&lrp->next_timeout)) {
+#ifdef HAVE_STRFTIME
+ time_t t = (time_t)lrp->next_timeout.tv_sec;
+ char tbuf[64];
+ if (strftime(tbuf, sizeof(tbuf),
+ "%m-%d-%H:%M:%S", localtime(&t)) != 0)
+ __db_msg(dbenv, "next_timeout: %s.%lu",
+ tbuf, (u_long)lrp->next_timeout.tv_usec);
+ else
+#endif
+ __db_msg(dbenv, "next_timeout: %lu",
+ (u_long)lrp->next_timeout.tv_usec);
+ }
}
if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_CONF)) {
@@ -320,28 +309,23 @@ __lock_print_all(dbenv, flags)
__db_msg(dbenv, "Locks grouped by lockers:");
__lock_print_header(dbenv);
for (k = 0; k < lrp->locker_t_size; k++)
- for (lip =
- SH_TAILQ_FIRST(&lt->locker_tab[k], __db_locker);
- lip != NULL;
- lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
+ SH_TAILQ_FOREACH(
+ lip, &lt->locker_tab[k], links, __db_locker)
__lock_dump_locker(dbenv, &mb, lt, lip);
- }
}
if (LF_ISSET(DB_STAT_ALL | DB_STAT_LOCK_OBJECTS)) {
__db_msg(dbenv, "%s", DB_GLOBAL(db_line));
__db_msg(dbenv, "Locks grouped by object:");
__lock_print_header(dbenv);
- for (k = 0; k < lrp->object_t_size; k++) {
- for (op = SH_TAILQ_FIRST(&lt->obj_tab[k], __db_lockobj);
- op != NULL;
- op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
+ for (k = 0; k < lrp->object_t_size; k++)
+ SH_TAILQ_FOREACH(
+ op, &lt->obj_tab[k], links, __db_lockobj) {
__lock_dump_object(lt, &mb, op);
__db_msg(dbenv, "%s", "");
}
- }
}
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
return (0);
}
@@ -354,34 +338,45 @@ __lock_dump_locker(dbenv, mbp, lt, lip)
DB_LOCKER *lip;
{
struct __db_lock *lp;
- time_t s;
- char buf[64];
+ char buf[DB_THREADID_STRLEN];
__db_msgadd(dbenv,
- mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d",
- (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites);
+ mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d pid/thread %s",
+ (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites,
+ dbenv->thread_id_string(dbenv, lip->pid, lip->tid, buf));
__db_msgadd(
dbenv, mbp, "%s", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " ");
if (LOCK_TIME_ISVALID(&lip->tx_expire)) {
- s = (time_t)lip->tx_expire.tv_sec;
- if (strftime(buf,
- sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)) != 0)
+#ifdef HAVE_STRFTIME
+ time_t t = (time_t)lip->tx_expire.tv_sec;
+ char tbuf[64];
+ if (strftime(tbuf, sizeof(tbuf),
+ "%m-%d-%H:%M:%S", localtime(&t)) != 0)
__db_msgadd(dbenv, mbp, "expires %s.%lu",
- buf, (u_long)lip->tx_expire.tv_usec);
+ tbuf, (u_long)lip->tx_expire.tv_usec);
+ else
+#endif
+ __db_msgadd(dbenv, mbp, "expires %lu",
+ (u_long)lip->tx_expire.tv_usec);
}
if (F_ISSET(lip, DB_LOCKER_TIMEOUT))
__db_msgadd(dbenv, mbp, " lk timeout %u", lip->lk_timeout);
if (LOCK_TIME_ISVALID(&lip->lk_expire)) {
- s = (time_t)lip->lk_expire.tv_sec;
- if (strftime(buf,
- sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s)) != 0)
+#ifdef HAVE_STRFTIME
+ time_t t = (time_t)lip->lk_expire.tv_sec;
+ char tbuf[64];
+ if (strftime(tbuf,
+ sizeof(tbuf), "%m-%d-%H:%M:%S", localtime(&t)) != 0)
__db_msgadd(dbenv, mbp, " lk expires %s.%lu",
- buf, (u_long)lip->lk_expire.tv_usec);
+ tbuf, (u_long)lip->lk_expire.tv_usec);
+ else
+#endif
+ __db_msgadd(dbenv, mbp, " lk expires %lu",
+ (u_long)lip->lk_expire.tv_usec);
}
DB_MSGBUF_FLUSH(dbenv, mbp);
- for (lp = SH_LIST_FIRST(&lip->heldby, __db_lock);
- lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
+ SH_LIST_FOREACH(lp, &lip->heldby, locker_links, __db_lock)
__lock_printlock(lt, mbp, lp, 1);
}
@@ -393,15 +388,9 @@ __lock_dump_object(lt, mbp, op)
{
struct __db_lock *lp;
- for (lp =
- SH_TAILQ_FIRST(&op->holders, __db_lock);
- lp != NULL;
- lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ SH_TAILQ_FOREACH(lp, &op->holders, links, __db_lock)
__lock_printlock(lt, mbp, lp, 1);
- for (lp =
- SH_TAILQ_FIRST(&op->waiters, __db_lock);
- lp != NULL;
- lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ SH_TAILQ_FOREACH(lp, &op->waiters, links, __db_lock)
__lock_printlock(lt, mbp, lp, 1);
}
@@ -447,9 +436,6 @@ __lock_printlock(lt, mbp, lp, ispgno)
}
switch (lp->mode) {
- case DB_LOCK_DIRTY:
- mode = "DIRTY_READ";
- break;
case DB_LOCK_IREAD:
mode = "IREAD";
break;
@@ -465,6 +451,9 @@ __lock_printlock(lt, mbp, lp, ispgno)
case DB_LOCK_READ:
mode = "READ";
break;
+ case DB_LOCK_READ_UNCOMMITTED:
+ mode = "READ_UNCOMMITTED";
+ break;
case DB_LOCK_WRITE:
mode = "WRITE";
break;
@@ -491,9 +480,6 @@ __lock_printlock(lt, mbp, lp, ispgno)
case DB_LSTAT_HELD:
status = "HELD";
break;
- case DB_LSTAT_NOTEXIST:
- status = "NOTEXIST";
- break;
case DB_LSTAT_PENDING:
status = "PENDING";
break;
diff --git a/db/lock/lock_timer.c b/db/lock/lock_timer.c
index 55efb6c6c..11ccd3036 100644
--- a/db/lock/lock_timer.c
+++ b/db/lock/lock_timer.c
@@ -1,23 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_timer.c,v 11.141 2004/03/24 20:51:39 bostic Exp $
+ * $Id: lock_timer.c,v 12.6 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#include <stdlib.h>
-#endif
-
#include "db_int.h"
-#include "dbinc/db_shash.h"
#include "dbinc/lock.h"
/*
@@ -38,14 +30,11 @@ __lock_set_timeout(dbenv, locker, timeout, op)
db_timeout_t timeout;
u_int32_t op;
{
- DB_LOCKTAB *lt;
int ret;
- lt = dbenv->lk_handle;
-
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
ret = __lock_set_timeout_internal(dbenv, locker, timeout, op);
- UNLOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
@@ -127,7 +116,7 @@ __lock_inherit_timeout(dbenv, parent, locker)
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
ret = 0;
- LOCKREGION(dbenv, lt);
+ LOCK_SYSTEM_LOCK(dbenv);
/* If the parent does not exist, we are done. */
LOCKER_LOCK(lt, region, parent, locker_ndx);
@@ -162,8 +151,7 @@ __lock_inherit_timeout(dbenv, parent, locker)
}
done:
-err:
- UNLOCKREGION(dbenv, lt);
+err: LOCK_SYSTEM_UNLOCK(dbenv);
return (ret);
}
diff --git a/db/lock/lock_util.c b/db/lock/lock_util.c
index 0c38d72ac..be5d18626 100644
--- a/db/lock/lock_util.c
+++ b/db/lock/lock_util.c
@@ -1,60 +1,20 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996-2004
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1996-2006
+ * Oracle Corporation. All rights reserved.
*
- * $Id: lock_util.c,v 11.12 2004/09/22 03:48:29 bostic Exp $
+ * $Id: lock_util.c,v 12.7 2006/08/24 14:46:11 bostic Exp $
*/
#include "db_config.h"
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
#include "db_int.h"
#include "dbinc/db_page.h"
-#include "dbinc/db_shash.h"
#include "dbinc/hash.h"
#include "dbinc/lock.h"
/*
- * __lock_cmp --
- * This function is used to compare a DBT that is about to be entered
- * into a hash table with an object already in the hash table. Note
- * that it just returns true on equal and 0 on not-equal. Therefore
- * this function cannot be used as a sort function; its purpose is to
- * be used as a hash comparison function.
- *
- * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
- */
-int
-__lock_cmp(dbt, lock_obj)
- const DBT *dbt;
- DB_LOCKOBJ *lock_obj;
-{
- void *obj_data;
-
- obj_data = SH_DBT_PTR(&lock_obj->lockobj);
- return (dbt->size == lock_obj->lockobj.size &&
- memcmp(dbt->data, obj_data, dbt->size) == 0);
-}
-
-/*
- * PUBLIC: int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
- */
-int
-__lock_locker_cmp(locker, sh_locker)
- u_int32_t locker;
- DB_LOCKER *sh_locker;
-{
- return (locker == sh_locker->id);
-}
-
-/*
* The next two functions are the hash functions used to store objects in the
* lock hash tables. They are hashing the same items, but one (__lock_ohash)
* takes a DBT (used for hashing a parameter passed from the user) and the
@@ -121,21 +81,6 @@ __lock_lhash(lock_obj)
}
/*
- * __lock_locker_hash --
- * Hash function for entering lockers into the locker hash table.
- * Since these are simply 32-bit unsigned integers, just return
- * the locker value.
- *
- * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t));
- */
-u_int32_t
-__lock_locker_hash(locker)
- u_int32_t locker;
-{
- return (locker);
-}
-
-/*
* __lock_nomem --
* Report a lack of some resource.
*
@@ -146,6 +91,6 @@ __lock_nomem(dbenv, res)
DB_ENV *dbenv;
const char *res;
{
- __db_err(dbenv, "Lock table is out of available %s", res);
+ __db_errx(dbenv, "Lock table is out of available %s", res);
return (ENOMEM);
}