diff options
author | Panu Matilainen <pmatilai@redhat.com> | 2007-07-16 16:48:14 +0300 |
---|---|---|
committer | Panu Matilainen <pmatilai@redhat.com> | 2007-07-16 16:48:14 +0300 |
commit | 2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79 (patch) | |
tree | e12ee52087506ac8c7a5eee83b17497d98df2d40 /db/mutex | |
parent | b754fe19fd387ca5fe8e7c00ddaa25c898fa192f (diff) | |
download | librpm-tizen-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.tar.gz librpm-tizen-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.tar.bz2 librpm-tizen-2cfd3012bfcb5c5c61bbaf662ef084e0ab789d79.zip |
Update internal BDB to version 4.5.20
Diffstat (limited to 'db/mutex')
-rw-r--r-- | db/mutex/README | 58 | ||||
-rw-r--r-- | db/mutex/mut_alloc.c | 230 | ||||
-rw-r--r-- | db/mutex/mut_failchk.c | 69 | ||||
-rw-r--r-- | db/mutex/mut_fcntl.c | 163 | ||||
-rw-r--r-- | db/mutex/mut_method.c | 286 | ||||
-rw-r--r-- | db/mutex/mut_pthread.c | 321 | ||||
-rw-r--r-- | db/mutex/mut_region.c | 356 | ||||
-rw-r--r-- | db/mutex/mut_stat.c | 450 | ||||
-rw-r--r-- | db/mutex/mut_tas.c | 228 | ||||
-rw-r--r-- | db/mutex/mut_win32.c | 240 | ||||
-rw-r--r-- | db/mutex/tm.c | 938 | ||||
-rw-r--r-- | db/mutex/uts4_cc.s | 6 |
12 files changed, 2512 insertions, 833 deletions
diff --git a/db/mutex/README b/db/mutex/README index 323c34f1e..23527586a 100644 --- a/db/mutex/README +++ b/db/mutex/README @@ -1,35 +1,37 @@ -# $Id: README,v 11.2 1999/11/21 18:12:48 bostic Exp $ +# $Id: README,v 12.1 2005/07/20 16:51:55 bostic Exp $ Note: this only applies to locking using test-and-set and fcntl calls, pthreads were added after this was written. -Resource locking routines: lock based on a db_mutex_t. All this gunk +Resource locking routines: lock based on a DB_MUTEX. All this gunk (including trying to make assembly code portable), is necessary because System V semaphores require system calls for uncontested locks and we don't want to make two system calls per resource lock. -First, this is how it works. The db_mutex_t structure contains a resource +First, this is how it works. The DB_MUTEX structure contains a resource test-and-set lock (tsl), a file offset, a pid for debugging and statistics information. -If HAVE_MUTEX_THREADS is defined (i.e. we know how to do test-and-sets -for this compiler/architecture combination), we try and lock the resource -tsl __os_spin() times. If we can't acquire the lock that way, we use a -system call to sleep for 1ms, 2ms, 4ms, etc. (The time is bounded at 1 -second, just in case.) Using the timer backoff means that there are two -assumptions: that locks are held for brief periods (never over system -calls or I/O) and that locks are not hotly contested. - -If HAVE_MUTEX_THREADS is not defined, i.e. we can't do test-and-sets, we -use a file descriptor to do byte locking on a file at a specified offset. -In this case, ALL of the locking is done in the kernel. Because file -descriptors are allocated per process, we have to provide the file -descriptor as part of the lock call. We still have to do timer backoff -because we need to be able to block ourselves, i.e. the lock manager -causes processes to wait by having the process acquire a mutex and then -attempting to re-acquire the mutex. There's no way to use kernel locking -to block yourself, i.e. if you hold a lock and attempt to re-acquire it, -the attempt will succeed. +If HAVE_MUTEX_FCNTL is NOT defined (that is, we know how to do +test-and-sets for this compiler/architecture combination), we try and +lock the resource tsl some number of times (based on the number of +processors). If we can't acquire the mutex that way, we use a system +call to sleep for 1ms, 2ms, 4ms, etc. (The time is bounded at 10ms for +mutexes backing logical locks and 25 ms for data structures, just in +case.) Using the timer backoff means that there are two assumptions: +that mutexes are held for brief periods (never over system calls or I/O) +and mutexes are not hotly contested. + +If HAVE_MUTEX_FCNTL is defined, we use a file descriptor to do byte +locking on a file at a specified offset. In this case, ALL of the +locking is done in the kernel. Because file descriptors are allocated +per process, we have to provide the file descriptor as part of the lock +call. We still have to do timer backoff because we need to be able to +block ourselves, that is, the lock manager causes processes to wait by +having the process acquire a mutex and then attempting to re-acquire the +mutex. There's no way to use kernel locking to block yourself, that is, +if you hold a lock and attempt to re-acquire it, the attempt will +succeed. Next, let's talk about why it doesn't work the way a reasonable person would think it should work. @@ -42,7 +44,7 @@ would wake any waiting processes up after releasing the lock. This would actually require both another tsl (call it the mutex tsl) and synchronization between the call that blocks in the kernel and the actual resource tsl. The mutex tsl would be used to protect accesses to the -db_mutex_t itself. Locking the mutex tsl would be done by a busy loop, +DB_MUTEX itself. Locking the mutex tsl would be done by a busy loop, which is safe because processes would never block holding that tsl (all they would do is try to obtain the resource tsl and set/check the wait count). The problem in this model is that the blocking call into the @@ -55,7 +57,7 @@ methods are sufficient to solve the problem. The problem with fcntl locking is that only the process that obtained the lock can release it. Remember, we want the normal state of the kernel -semaphore to be locked. So, if the creator of the db_mutex_t were to +semaphore to be locked. So, if the creator of the DB_MUTEX were to initialize the lock to "locked", then a second process locks the resource tsl, and then a third process needs to block, waiting for the resource tsl, when the second process wants to wake up the third process, it can't @@ -69,11 +71,11 @@ or using a different blocking offset depending on which process is holding the lock, but it gets complicated fairly quickly. I'm open to suggestions, but I'm not holding my breath. -Regardless, we use this form of locking when HAVE_SPINLOCKS is not -defined, (i.e. we're locking in the kernel) because it doesn't have the -limitations found in System V semaphores, and because the normal state of -the kernel object in that case is unlocked, so the process releasing the -lock is also the holder of the lock. +Regardless, we use this form of locking when we don't have any other +choice, because it doesn't have the limitations found in System V +semaphores, and because the normal state of the kernel object in that +case is unlocked, so the process releasing the lock is also the holder +of the lock. The System V semaphore design has a number of other limitations that make it inappropriate for this task. Namely: diff --git a/db/mutex/mut_alloc.c b/db/mutex/mut_alloc.c new file mode 100644 index 000000000..bfc453dc6 --- /dev/null +++ b/db/mutex/mut_alloc.c @@ -0,0 +1,230 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1999-2006 + * Oracle Corporation. All rights reserved. + * + * $Id: mut_alloc.c,v 12.15 2006/08/24 14:46:16 bostic Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" +#include "dbinc/mutex_int.h" + +/* + * __mutex_alloc -- + * Allocate a mutex from the mutex region. + * + * PUBLIC: int __mutex_alloc __P((DB_ENV *, int, u_int32_t, db_mutex_t *)); + */ +int +__mutex_alloc(dbenv, alloc_id, flags, indxp) + DB_ENV *dbenv; + int alloc_id; + u_int32_t flags; + db_mutex_t *indxp; +{ + int ret; + + /* The caller may depend on us to initialize. */ + *indxp = MUTEX_INVALID; + + /* + * If this is not an application lock, and we've turned off locking, + * or the DB_ENV handle isn't thread-safe, and this is a thread lock + * or the environment isn't multi-process by definition, there's no + * need to mutex at all. + */ + if (alloc_id != MTX_APPLICATION && + (F_ISSET(dbenv, DB_ENV_NOLOCKING) || + (!F_ISSET(dbenv, DB_ENV_THREAD) && + (LF_ISSET(DB_MUTEX_PROCESS_ONLY) || + F_ISSET(dbenv, DB_ENV_PRIVATE))))) + return (0); + + /* + * If we have a region in which to allocate the mutexes, lock it and + * do the allocation. + */ + if (MUTEX_ON(dbenv)) + return (__mutex_alloc_int(dbenv, 1, alloc_id, flags, indxp)); + + /* + * We have to allocate some number of mutexes before we have a region + * in which to allocate them. We handle this by saving up the list of + * flags and allocating them as soon as we have a handle. + * + * The list of mutexes to alloc is maintained in pairs: first the + * alloc_id argument, second the flags passed in by the caller. + */ + if (dbenv->mutex_iq == NULL) { + dbenv->mutex_iq_max = 50; + if ((ret = __os_calloc(dbenv, dbenv->mutex_iq_max, + sizeof(dbenv->mutex_iq[0]), &dbenv->mutex_iq)) != 0) + return (ret); + } else if (dbenv->mutex_iq_next == dbenv->mutex_iq_max - 1) { + dbenv->mutex_iq_max *= 2; + if ((ret = __os_realloc(dbenv, + dbenv->mutex_iq_max * sizeof(dbenv->mutex_iq[0]), + &dbenv->mutex_iq)) != 0) + return (ret); + } + *indxp = dbenv->mutex_iq_next + 1; /* Correct for MUTEX_INVALID. */ + dbenv->mutex_iq[dbenv->mutex_iq_next].alloc_id = alloc_id; + dbenv->mutex_iq[dbenv->mutex_iq_next].flags = flags; + ++dbenv->mutex_iq_next; + + return (0); +} + +/* + * __mutex_alloc_int -- + * Internal routine to allocate a mutex. + * + * PUBLIC: int __mutex_alloc_int + * PUBLIC: __P((DB_ENV *, int, int, u_int32_t, db_mutex_t *)); + */ +int +__mutex_alloc_int(dbenv, locksys, alloc_id, flags, indxp) + DB_ENV *dbenv; + int locksys, alloc_id; + u_int32_t flags; + db_mutex_t *indxp; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + int ret; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + ret = 0; + + /* + * If we're not initializing the mutex region, then lock the region to + * allocate new mutexes. Drop the lock before initializing the mutex, + * mutex initialization may require a system call. + */ + if (locksys) + MUTEX_SYSTEM_LOCK(dbenv); + + if (mtxregion->mutex_next == MUTEX_INVALID) { + __db_errx(dbenv, + "unable to allocate memory for mutex; resize mutex region"); + if (locksys) + MUTEX_SYSTEM_UNLOCK(dbenv); + return (ENOMEM); + } + + *indxp = mtxregion->mutex_next; + mutexp = MUTEXP_SET(*indxp); + mtxregion->mutex_next = mutexp->mutex_next_link; + + --mtxregion->stat.st_mutex_free; + ++mtxregion->stat.st_mutex_inuse; + if (mtxregion->stat.st_mutex_inuse > mtxregion->stat.st_mutex_inuse_max) + mtxregion->stat.st_mutex_inuse_max = + mtxregion->stat.st_mutex_inuse; + if (locksys) + MUTEX_SYSTEM_UNLOCK(dbenv); + + /* Initialize the mutex. */ + memset(mutexp, 0, sizeof(*mutexp)); + F_SET(mutexp, DB_MUTEX_ALLOCATED | + LF_ISSET(DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_PROCESS_ONLY)); + + /* + * If the mutex is associated with a single process, set the process + * ID. If the application ever calls DbEnv::failchk, we'll need the + * process ID to know if the mutex is still in use. + */ + if (LF_ISSET(DB_MUTEX_PROCESS_ONLY)) + dbenv->thread_id(dbenv, &mutexp->pid, NULL); + +#ifdef HAVE_STATISTICS + mutexp->alloc_id = alloc_id; +#else + COMPQUIET(alloc_id, 0); +#endif + + if ((ret = __mutex_init(dbenv, *indxp, flags)) != 0) + (void)__mutex_free_int(dbenv, locksys, indxp); + + return (ret); +} + +/* + * __mutex_free -- + * Free a mutex. + * + * PUBLIC: int __mutex_free __P((DB_ENV *, db_mutex_t *)); + */ +int +__mutex_free(dbenv, indxp) + DB_ENV *dbenv; + db_mutex_t *indxp; +{ + /* + * There is no explicit ordering in how the regions are cleaned up + * up and/or discarded when an environment is destroyed (either a + * private environment is closed or a public environment is removed). + * The way we deal with mutexes is to clean up all remaining mutexes + * when we close the mutex environment (because we have to be able to + * do that anyway, after a crash), which means we don't have to deal + * with region cleanup ordering on normal environment destruction. + * All that said, what it really means is we can get here without a + * mpool region. It's OK, the mutex has been, or will be, destroyed. + * + * If the mutex has never been configured, we're done. + */ + if (!MUTEX_ON(dbenv) || *indxp == MUTEX_INVALID) + return (0); + + return (__mutex_free_int(dbenv, 1, indxp)); +} + +/* + * __mutex_free_int -- + * Internal routine to free a mutex. + * + * PUBLIC: int __mutex_free_int __P((DB_ENV *, int, db_mutex_t *)); + */ +int +__mutex_free_int(dbenv, locksys, indxp) + DB_ENV *dbenv; + int locksys; + db_mutex_t *indxp; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + db_mutex_t mutex; + int ret; + + mutex = *indxp; + *indxp = MUTEX_INVALID; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + + DB_ASSERT(dbenv, F_ISSET(mutexp, DB_MUTEX_ALLOCATED)); + F_CLR(mutexp, DB_MUTEX_ALLOCATED); + + ret = __mutex_destroy(dbenv, mutex); + + if (locksys) + MUTEX_SYSTEM_LOCK(dbenv); + + /* Link the mutex on the head of the free list. */ + mutexp->mutex_next_link = mtxregion->mutex_next; + mtxregion->mutex_next = mutex; + ++mtxregion->stat.st_mutex_free; + --mtxregion->stat.st_mutex_inuse; + + if (locksys) + MUTEX_SYSTEM_UNLOCK(dbenv); + + return (ret); +} diff --git a/db/mutex/mut_failchk.c b/db/mutex/mut_failchk.c new file mode 100644 index 000000000..3d9d46a98 --- /dev/null +++ b/db/mutex/mut_failchk.c @@ -0,0 +1,69 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2005-2006 + * Oracle Corporation. All rights reserved. + * + * $Id: mut_failchk.c,v 12.3 2006/08/24 14:46:16 bostic Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" +#include "dbinc/mutex_int.h" + +/* + * __mut_failchk -- + * Check for mutexes held by dead processes. + * + * PUBLIC: int __mut_failchk __P((DB_ENV *)); + */ +int +__mut_failchk(dbenv) + DB_ENV *dbenv; +{ + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + DB_MUTEX *mutexp; + db_mutex_t i; + int ret; + char buf[DB_THREADID_STRLEN]; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + ret = 0; + + MUTEX_SYSTEM_LOCK(dbenv); + for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) { + mutexp = MUTEXP_SET(i); + + /* + * We're looking for per-process mutexes where the process + * has died. + */ + if (!F_ISSET(mutexp, DB_MUTEX_ALLOCATED) || + !F_ISSET(mutexp, DB_MUTEX_PROCESS_ONLY)) + continue; + + /* + * The thread that allocated the mutex may have exited, but + * we cannot reclaim the mutex if the process is still alive. + */ + if (dbenv->is_alive( + dbenv, mutexp->pid, 0, DB_MUTEX_PROCESS_ONLY)) + continue; + + __db_msg(dbenv, "Freeing mutex for process: %s", + dbenv->thread_id_string(dbenv, mutexp->pid, 0, buf)); + + /* Unlock and free the mutex. */ + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) + MUTEX_UNLOCK(dbenv, i); + + if ((ret = __mutex_free_int(dbenv, 0, &i)) != 0) + break; + } + MUTEX_SYSTEM_UNLOCK(dbenv); + + return (ret); +} diff --git a/db/mutex/mut_fcntl.c b/db/mutex/mut_fcntl.c index 03521bd77..eb4c6ef7f 100644 --- a/db/mutex/mut_fcntl.c +++ b/db/mutex/mut_fcntl.c @@ -1,64 +1,32 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 - * Sleepycat Software. All rights reserved. + * Copyright (c) 1996-2006 + * Oracle Corporation. All rights reserved. * - * $Id: mut_fcntl.c,v 11.26 2004/01/28 03:36:18 bostic Exp $ + * $Id: mut_fcntl.c,v 12.20 2006/08/24 14:46:16 bostic Exp $ */ #include "db_config.h" -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <fcntl.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> /* SEEK_SET on SunOS. */ -#endif - #include "db_int.h" +#include "dbinc/mutex_int.h" /* * __db_fcntl_mutex_init -- - * Initialize a DB mutex structure. + * Initialize a fcntl mutex. * - * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t)); + * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t)); */ int -__db_fcntl_mutex_init(dbenv, mutexp, offset) +__db_fcntl_mutex_init(dbenv, mutex, flags) DB_ENV *dbenv; - DB_MUTEX *mutexp; - u_int32_t offset; + db_mutex_t mutex; + u_int32_t flags; { - u_int32_t save; - - /* - * The only setting/checking of the MUTEX_MPOOL flag is in the mutex - * mutex allocation code (__db_mutex_alloc/free). Preserve only that - * flag. This is safe because even if this flag was never explicitly - * set, but happened to be set in memory, it will never be checked or - * acted upon. - */ - save = F_ISSET(mutexp, MUTEX_MPOOL); - memset(mutexp, 0, sizeof(*mutexp)); - F_SET(mutexp, save); - - /* - * This is where we decide to ignore locks we don't need to set -- if - * the application is private, we don't need any locks. - */ - if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { - F_SET(mutexp, MUTEX_IGNORE); - return (0); - } - - mutexp->off = offset; -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - mutexp->reg_off = INVALID_ROFF; -#endif - F_SET(mutexp, MUTEX_INITED); + COMPQUIET(dbenv, NULL); + COMPQUIET(mutex, MUTEX_INVALID); + COMPQUIET(flags, 0); return (0); } @@ -67,32 +35,45 @@ __db_fcntl_mutex_init(dbenv, mutexp, offset) * __db_fcntl_mutex_lock * Lock on a mutex, blocking if necessary. * - * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, db_mutex_t)); */ int -__db_fcntl_mutex_lock(dbenv, mutexp) +__db_fcntl_mutex_lock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; struct flock k_lock; - int locked, ms, waited; + int locked, ms, ret; - if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + +#ifdef HAVE_STATISTICS + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) + ++mutexp->mutex_set_wait; + else + ++mutexp->mutex_set_nowait; +#endif + /* Initialize the lock. */ k_lock.l_whence = SEEK_SET; - k_lock.l_start = mutexp->off; + k_lock.l_start = mutex; k_lock.l_len = 1; - for (locked = waited = 0;;) { + for (locked = 0;;) { /* * Wait for the lock to become available; wait 1ms initially, * up to 1 second. */ - for (ms = 1; mutexp->pid != 0;) { - waited = 1; - __os_yield(NULL, ms * USEC_PER_MS); + for (ms = 1; F_ISSET(mutexp, DB_MUTEX_LOCKED);) { + __os_sleep(NULL, 0, ms * USEC_PER_MS); if ((ms <<= 1) > MS_PER_SEC) ms = MS_PER_SEC; } @@ -100,18 +81,21 @@ __db_fcntl_mutex_lock(dbenv, mutexp) /* Acquire an exclusive kernel lock. */ k_lock.l_type = F_WRLCK; if (fcntl(dbenv->lockfhp->fd, F_SETLKW, &k_lock)) - return (__os_get_errno()); + goto err; /* If the resource is still available, it's ours. */ - if (mutexp->pid == 0) { + if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) { locked = 1; - __os_id(&mutexp->pid); + + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); } /* Release the kernel lock. */ k_lock.l_type = F_UNLCK; if (fcntl(dbenv->lockfhp->fd, F_SETLK, &k_lock)) - return (__os_get_errno()); + goto err; /* * If we got the resource lock we're done. @@ -126,57 +110,74 @@ __db_fcntl_mutex_lock(dbenv, mutexp) break; } - if (waited) - ++mutexp->mutex_set_wait; - else - ++mutexp->mutex_set_nowait; +#ifdef DIAGNOSTIC + /* + * We want to switch threads as often as possible. Yield every time + * we get a mutex to ensure contention. + */ + if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) + __os_yield(dbenv); +#endif return (0); + +err: ret = __os_get_syserr(); + __db_syserr(dbenv, ret, "fcntl lock failed"); + return (__db_panic(dbenv, __os_posix_err(ret))); } /* * __db_fcntl_mutex_unlock -- - * Release a lock. + * Release a mutex. * - * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, db_mutex_t)); */ int -__db_fcntl_mutex_unlock(dbenv, mutexp) +__db_fcntl_mutex_unlock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { - if (F_ISSET(dbenv, DB_ENV_NOLOCKING)) + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + #ifdef DIAGNOSTIC -#define MSG "mutex_unlock: ERROR: released lock that was unlocked\n" -#ifndef STDERR_FILENO -#define STDERR_FILENO 2 -#endif - if (mutexp->pid == 0) - write(STDERR_FILENO, MSG, sizeof(MSG) - 1); + if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + __db_errx(dbenv, "fcntl unlock failed: lock already unlocked"); + return (__db_panic(dbenv, EACCES)); + } #endif /* * Release the resource. We don't have to acquire any locks because - * processes trying to acquire the lock are checking for a pid set to - * 0/non-0, not to any specific value. + * processes trying to acquire the lock are waiting for the flag to + * go to 0. Once that happens the waiters will serialize acquiring + * an exclusive kernel lock before locking the mutex. */ - mutexp->pid = 0; + F_CLR(mutexp, DB_MUTEX_LOCKED); return (0); } /* * __db_fcntl_mutex_destroy -- - * Destroy a DB_MUTEX. + * Destroy a mutex. * - * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_MUTEX *)); + * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_ENV *, db_mutex_t)); */ int -__db_fcntl_mutex_destroy(mutexp) - DB_MUTEX *mutexp; +__db_fcntl_mutex_destroy(dbenv, mutex) + DB_ENV *dbenv; + db_mutex_t mutex; { - COMPQUIET(mutexp, NULL); + COMPQUIET(dbenv, NULL); + COMPQUIET(mutex, MUTEX_INVALID); return (0); } diff --git a/db/mutex/mut_method.c b/db/mutex/mut_method.c new file mode 100644 index 000000000..08da11d94 --- /dev/null +++ b/db/mutex/mut_method.c @@ -0,0 +1,286 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2006 + * Oracle Corporation. All rights reserved. + * + * $Id: mut_method.c,v 12.12 2006/08/24 14:46:16 bostic Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" +#include "dbinc/mutex_int.h" + +/* + * __mutex_alloc_pp -- + * Allocate a mutex, application method. + * + * PUBLIC: int __mutex_alloc_pp __P((DB_ENV *, u_int32_t, db_mutex_t *)); + */ +int +__mutex_alloc_pp(dbenv, flags, indxp) + DB_ENV *dbenv; + u_int32_t flags; + db_mutex_t *indxp; +{ + DB_THREAD_INFO *ip; + int ret; + + PANIC_CHECK(dbenv); + + switch (flags) { + case 0: + case DB_MUTEX_PROCESS_ONLY: + case DB_MUTEX_SELF_BLOCK: + break; + default: + return (__db_ferr(dbenv, "DB_ENV->mutex_alloc", 0)); + } + + ENV_ENTER(dbenv, ip); + ret = __mutex_alloc(dbenv, MTX_APPLICATION, flags, indxp); + ENV_LEAVE(dbenv, ip); + + return (ret); +} + +/* + * __mutex_free_pp -- + * Destroy a mutex, application method. + * + * PUBLIC: int __mutex_free_pp __P((DB_ENV *, db_mutex_t)); + */ +int +__mutex_free_pp(dbenv, indx) + DB_ENV *dbenv; + db_mutex_t indx; +{ + DB_THREAD_INFO *ip; + int ret; + + PANIC_CHECK(dbenv); + + if (indx == MUTEX_INVALID) + return (EINVAL); + + /* + * Internally Berkeley DB passes around the db_mutex_t address on + * free, because we want to make absolutely sure the slot gets + * overwritten with MUTEX_INVALID. We don't export MUTEX_INVALID, + * so we don't export that part of the API, either. + */ + ENV_ENTER(dbenv, ip); + ret = __mutex_free(dbenv, &indx); + ENV_LEAVE(dbenv, ip); + + return (ret); +} + +/* + * __mutex_lock -- + * Lock a mutex, application method. + * + * PUBLIC: int __mutex_lock_pp __P((DB_ENV *, db_mutex_t)); + */ +int +__mutex_lock_pp(dbenv, indx) + DB_ENV *dbenv; + db_mutex_t indx; +{ + PANIC_CHECK(dbenv); + + if (indx == MUTEX_INVALID) + return (EINVAL); + + return (__mutex_lock(dbenv, indx)); +} + +/* + * __mutex_unlock -- + * Unlock a mutex, application method. + * + * PUBLIC: int __mutex_unlock_pp __P((DB_ENV *, db_mutex_t)); + */ +int +__mutex_unlock_pp(dbenv, indx) + DB_ENV *dbenv; + db_mutex_t indx; +{ + PANIC_CHECK(dbenv); + + if (indx == MUTEX_INVALID) + return (EINVAL); + + return (__mutex_unlock(dbenv, indx)); +} + +/* + * __mutex_get_align -- + * DB_ENV->mutex_get_align. + * + * PUBLIC: int __mutex_get_align __P((DB_ENV *, u_int32_t *)); + */ +int +__mutex_get_align(dbenv, alignp) + DB_ENV *dbenv; + u_int32_t *alignp; +{ + if (MUTEX_ON(dbenv)) + *alignp = ((DB_MUTEXREGION *) + dbenv->mutex_handle->reginfo.primary)->stat.st_mutex_align; + else + *alignp = dbenv->mutex_align; + return (0); +} + +/* + * __mutex_set_align -- + * DB_ENV->mutex_set_align. + * + * PUBLIC: int __mutex_set_align __P((DB_ENV *, u_int32_t)); + */ +int +__mutex_set_align(dbenv, align) + DB_ENV *dbenv; + u_int32_t align; +{ + ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_align"); + + if (align == 0 || !POWER_OF_TWO(align)) { + __db_errx(dbenv, + "DB_ENV->mutex_set_align: alignment value must be a non-zero power-of-two"); + return (EINVAL); + } + + dbenv->mutex_align = align; + return (0); +} + +/* + * __mutex_get_increment -- + * DB_ENV->mutex_get_increment. + * + * PUBLIC: int __mutex_get_increment __P((DB_ENV *, u_int32_t *)); + */ +int +__mutex_get_increment(dbenv, incrementp) + DB_ENV *dbenv; + u_int32_t *incrementp; +{ + /* + * We don't maintain the increment in the region (it just makes + * no sense). Return whatever we have configured on this handle, + * nobody is ever going to notice. + */ + *incrementp = dbenv->mutex_inc; + return (0); +} + +/* + * __mutex_set_increment -- + * DB_ENV->mutex_set_increment. + * + * PUBLIC: int __mutex_set_increment __P((DB_ENV *, u_int32_t)); + */ +int +__mutex_set_increment(dbenv, increment) + DB_ENV *dbenv; + u_int32_t increment; +{ + ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_increment"); + + dbenv->mutex_cnt = 0; + dbenv->mutex_inc = increment; + return (0); +} + +/* + * __mutex_get_max -- + * DB_ENV->mutex_get_max. + * + * PUBLIC: int __mutex_get_max __P((DB_ENV *, u_int32_t *)); + */ +int +__mutex_get_max(dbenv, maxp) + DB_ENV *dbenv; + u_int32_t *maxp; +{ + if (MUTEX_ON(dbenv)) + *maxp = ((DB_MUTEXREGION *) + dbenv->mutex_handle->reginfo.primary)->stat.st_mutex_cnt; + else + *maxp = dbenv->mutex_cnt; + return (0); +} + +/* + * __mutex_set_max -- + * DB_ENV->mutex_set_max. + * + * PUBLIC: int __mutex_set_max __P((DB_ENV *, u_int32_t)); + */ +int +__mutex_set_max(dbenv, max) + DB_ENV *dbenv; + u_int32_t max; +{ + ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->set_mutex_max"); + + dbenv->mutex_cnt = max; + dbenv->mutex_inc = 0; + return (0); +} + +/* + * __mutex_get_tas_spins -- + * DB_ENV->mutex_get_tas_spins. + * + * PUBLIC: int __mutex_get_tas_spins __P((DB_ENV *, u_int32_t *)); + */ +int +__mutex_get_tas_spins(dbenv, tas_spinsp) + DB_ENV *dbenv; + u_int32_t *tas_spinsp; +{ + if (MUTEX_ON(dbenv)) + *tas_spinsp = ((DB_MUTEXREGION *)dbenv-> + mutex_handle->reginfo.primary)->stat.st_mutex_tas_spins; + else + *tas_spinsp = dbenv->mutex_tas_spins; + return (0); +} + +/* + * __mutex_set_tas_spins -- + * DB_ENV->mutex_set_tas_spins. + * + * PUBLIC: int __mutex_set_tas_spins __P((DB_ENV *, u_int32_t)); + */ +int +__mutex_set_tas_spins(dbenv, tas_spins) + DB_ENV *dbenv; + u_int32_t tas_spins; +{ + /* + * Bound the value -- less than 1 makes no sense, greater than 1M + * makes no sense. + */ + if (tas_spins == 0) + tas_spins = 1; + else if (tas_spins > 1000000) + tas_spins = 1000000; + + /* + * There's a theoretical race here, but I'm not interested in locking + * the test-and-set spin count. The worst possibility is a thread + * reads out a bad spin count and spins until it gets the lock, but + * that's awfully unlikely. + */ + if (MUTEX_ON(dbenv)) + ((DB_MUTEXREGION *)dbenv->mutex_handle + ->reginfo.primary)->stat.st_mutex_tas_spins = tas_spins; + else + dbenv->mutex_tas_spins = tas_spins; + return (0); +} diff --git a/db/mutex/mut_pthread.c b/db/mutex/mut_pthread.c index 6507eba73..08c7b388b 100644 --- a/db/mutex/mut_pthread.c +++ b/db/mutex/mut_pthread.c @@ -1,32 +1,16 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1999-2004 - * Sleepycat Software. All rights reserved. + * Copyright (c) 1999-2006 + * Oracle Corporation. All rights reserved. * - * $Id: mut_pthread.c,v 11.62 2004/09/22 16:27:05 bostic Exp $ + * $Id: mut_pthread.c,v 12.19 2006/08/24 14:46:16 bostic Exp $ */ #include "db_config.h" -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <string.h> -#include <unistd.h> -#endif - #include "db_int.h" - -#ifdef DIAGNOSTIC -#undef MSG1 -#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n" -#undef MSG2 -#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n" -#ifndef STDERR_FILENO -#define STDERR_FILENO 2 -#endif -#endif +#include "dbinc/mutex_int.h" #ifdef HAVE_MUTEX_SOLARIS_LWP #define pthread_cond_destroy(x) 0 @@ -36,12 +20,6 @@ #define pthread_mutex_lock _lwp_mutex_lock #define pthread_mutex_trylock _lwp_mutex_trylock #define pthread_mutex_unlock _lwp_mutex_unlock -/* - * !!! - * _lwp_self returns the LWP process ID which isn't a unique per-thread - * identifier. Use pthread_self instead, it appears to work even if we - * are not a pthreads application. - */ #endif #ifdef HAVE_MUTEX_UI_THREADS #define pthread_cond_destroy(x) cond_destroy @@ -51,89 +29,78 @@ #define pthread_mutex_lock mutex_lock #define pthread_mutex_trylock mutex_trylock #define pthread_mutex_unlock mutex_unlock -#define pthread_self thr_self #endif #define PTHREAD_UNLOCK_ATTEMPTS 5 /* + * IBM's MVS pthread mutex implementation returns -1 and sets errno rather than + * returning errno itself. As -1 is not a valid errno value, assume functions + * returning -1 have set errno. If they haven't, return a random error value. + */ +#define RET_SET(f, ret) do { \ + if (((ret) = (f)) == -1 && ((ret) = errno) == 0) \ + (ret) = EAGAIN; \ +} while (0) + +/* * __db_pthread_mutex_init -- - * Initialize a DB_MUTEX. + * Initialize a pthread mutex. * - * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t)); + * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t)); */ int -__db_pthread_mutex_init(dbenv, mutexp, flags) +__db_pthread_mutex_init(dbenv, mutex, flags) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; u_int32_t flags; { - u_int32_t save; + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; int ret; + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); ret = 0; - /* - * The only setting/checking of the MUTEX_MPOOL flag is in the mutex - * mutex allocation code (__db_mutex_alloc/free). Preserve only that - * flag. This is safe because even if this flag was never explicitly - * set, but happened to be set in memory, it will never be checked or - * acted upon. - */ - save = F_ISSET(mutexp, MUTEX_MPOOL); - memset(mutexp, 0, sizeof(*mutexp)); - F_SET(mutexp, save); - - /* - * If this is a thread lock or the process has told us that there are - * no other processes in the environment, use thread-only locks, they - * are faster in some cases. - * - * This is where we decide to ignore locks we don't need to set -- if - * the application isn't threaded, there aren't any threads to block. - */ - if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) { - if (!F_ISSET(dbenv, DB_ENV_THREAD)) { - F_SET(mutexp, MUTEX_IGNORE); - return (0); - } - } - #ifdef HAVE_MUTEX_PTHREADS { pthread_condattr_t condattr, *condattrp = NULL; pthread_mutexattr_t mutexattr, *mutexattrp = NULL; - if (!LF_ISSET(MUTEX_THREAD)) { - ret = pthread_mutexattr_init(&mutexattr); + if (!LF_ISSET(DB_MUTEX_PROCESS_ONLY)) { + RET_SET((pthread_mutexattr_init(&mutexattr)), ret); #ifndef HAVE_MUTEX_THREAD_ONLY if (ret == 0) - ret = pthread_mutexattr_setpshared( - &mutexattr, PTHREAD_PROCESS_SHARED); + RET_SET((pthread_mutexattr_setpshared( + &mutexattr, PTHREAD_PROCESS_SHARED)), ret); #endif mutexattrp = &mutexattr; } if (ret == 0) - ret = pthread_mutex_init(&mutexp->mutex, mutexattrp); + RET_SET((pthread_mutex_init(&mutexp->mutex, mutexattrp)), ret); if (mutexattrp != NULL) - pthread_mutexattr_destroy(mutexattrp); - if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) { - if (!LF_ISSET(MUTEX_THREAD)) { - ret = pthread_condattr_init(&condattr); -#ifndef HAVE_MUTEX_THREAD_ONLY + (void)pthread_mutexattr_destroy(mutexattrp); + if (ret == 0 && LF_ISSET(DB_MUTEX_SELF_BLOCK)) { + if (!LF_ISSET(DB_MUTEX_PROCESS_ONLY)) { + RET_SET((pthread_condattr_init(&condattr)), ret); if (ret == 0) { condattrp = &condattr; - ret = pthread_condattr_setpshared( - &condattr, PTHREAD_PROCESS_SHARED); - } +#ifndef HAVE_MUTEX_THREAD_ONLY + RET_SET((pthread_condattr_setpshared( + &condattr, PTHREAD_PROCESS_SHARED)), ret); #endif + } } if (ret == 0) - ret = pthread_cond_init(&mutexp->cond, condattrp); + RET_SET( + (pthread_cond_init(&mutexp->cond, condattrp)), ret); - F_SET(mutexp, MUTEX_SELF_BLOCK); + F_SET(mutexp, DB_MUTEX_SELF_BLOCK); if (condattrp != NULL) (void)pthread_condattr_destroy(condattrp); } @@ -149,7 +116,7 @@ __db_pthread_mutex_init(dbenv, mutexp, flags) * initialization values doesn't have surrounding braces. There's not * much we can do. */ - if (LF_ISSET(MUTEX_THREAD)) { + if (LF_ISSET(DB_MUTEX_PROCESS_ONLY)) { static lwp_mutex_t mi = DEFAULTMUTEX; mutexp->mutex = mi; @@ -158,8 +125,8 @@ __db_pthread_mutex_init(dbenv, mutexp, flags) mutexp->mutex = mi; } - if (LF_ISSET(MUTEX_SELF_BLOCK)) { - if (LF_ISSET(MUTEX_THREAD)) { + if (LF_ISSET(DB_MUTEX_SELF_BLOCK)) { + if (LF_ISSET(DB_MUTEX_PROCESS_ONLY)) { static lwp_cond_t ci = DEFAULTCV; mutexp->cond = ci; @@ -168,63 +135,74 @@ __db_pthread_mutex_init(dbenv, mutexp, flags) mutexp->cond = ci; } - F_SET(mutexp, MUTEX_SELF_BLOCK); + F_SET(mutexp, DB_MUTEX_SELF_BLOCK); } #endif #ifdef HAVE_MUTEX_UI_THREADS { int type; - type = LF_ISSET(MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS; + type = LF_ISSET(DB_MUTEX_PROCESS_ONLY) ? USYNC_THREAD : USYNC_PROCESS; ret = mutex_init(&mutexp->mutex, type, NULL); - if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) { + if (ret == 0 && LF_ISSET(DB_MUTEX_SELF_BLOCK)) { ret = cond_init(&mutexp->cond, type, NULL); - F_SET(mutexp, MUTEX_SELF_BLOCK); + F_SET(mutexp, DB_MUTEX_SELF_BLOCK); }} #endif -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - mutexp->reg_off = INVALID_ROFF; -#endif - if (ret == 0) - F_SET(mutexp, MUTEX_INITED); - else - __db_err(dbenv, - "unable to initialize mutex: %s", strerror(ret)); - + if (ret != 0) { + __db_err(dbenv, ret, "unable to initialize mutex"); + } return (ret); } /* * __db_pthread_mutex_lock - * Lock on a mutex, logically blocking if necessary. + * Lock on a mutex, blocking if necessary. * - * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, db_mutex_t)); */ int -__db_pthread_mutex_lock(dbenv, mutexp) +__db_pthread_mutex_lock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { - u_int32_t nspins; - int i, ret, waited; + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + int i, ret; - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); - /* Attempt to acquire the resource for N spins. */ - for (nspins = dbenv->tas_spins; nspins > 0; --nspins) - if (pthread_mutex_trylock(&mutexp->mutex) == 0) - break; + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + +#ifdef HAVE_STATISTICS + /* + * We want to know which mutexes are contentious, but don't want to + * do an interlocked test here -- that's slower when the underlying + * system has adaptive mutexes and can perform optimizations like + * spinning only if the thread holding the mutex is actually running + * on a CPU. Make a guess, using a normal load instruction. + */ + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) + ++mutexp->mutex_set_wait; + else + ++mutexp->mutex_set_nowait; +#endif - if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0) + RET_SET((pthread_mutex_lock(&mutexp->mutex)), ret); + if (ret != 0) goto err; - if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) { - for (waited = 0; mutexp->locked != 0; waited = 1) { - ret = pthread_cond_wait(&mutexp->cond, &mutexp->mutex); + if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) { + while (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + RET_SET((pthread_cond_wait( + &mutexp->cond, &mutexp->mutex)), ret); /* * !!! * Solaris bug workaround: @@ -241,20 +219,14 @@ __db_pthread_mutex_lock(dbenv, mutexp) #endif ret != ETIMEDOUT) { (void)pthread_mutex_unlock(&mutexp->mutex); - return (ret); + goto err; } } - if (waited) - ++mutexp->mutex_set_wait; - else - ++mutexp->mutex_set_nowait; + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); -#ifdef DIAGNOSTIC - mutexp->locked = (u_int32_t)pthread_self(); -#else - mutexp->locked = 1; -#endif /* * According to HP-UX engineers contacted by Netscape, * pthread_mutex_unlock() will occasionally return EFAULT @@ -266,101 +238,130 @@ __db_pthread_mutex_lock(dbenv, mutexp) */ i = PTHREAD_UNLOCK_ATTEMPTS; do { - ret = pthread_mutex_unlock(&mutexp->mutex); + RET_SET((pthread_mutex_unlock(&mutexp->mutex)), ret); } while (ret == EFAULT && --i > 0); if (ret != 0) goto err; } else { - if (nspins == dbenv->tas_spins) - ++mutexp->mutex_set_nowait; - else if (nspins > 0) { - ++mutexp->mutex_set_spin; - mutexp->mutex_set_spins += dbenv->tas_spins - nspins; - } else - ++mutexp->mutex_set_wait; #ifdef DIAGNOSTIC - if (mutexp->locked) { - char msgbuf[128]; - (void)snprintf(msgbuf, - sizeof(msgbuf), MSG1, (u_long)mutexp->locked); - (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf)); + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + char buf[DB_THREADID_STRLEN]; + (void)dbenv->thread_id_string(dbenv, + mutexp->pid, mutexp->tid, buf); + __db_errx(dbenv, + "pthread lock failed: lock currently in use: pid/tid: %s", + buf); + ret = EINVAL; + goto err; } - mutexp->locked = (u_int32_t)pthread_self(); -#else - mutexp->locked = 1; #endif + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); } + +#ifdef DIAGNOSTIC + /* + * We want to switch threads as often as possible. Yield every time + * we get a mutex to ensure contention. + */ + if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) + __os_yield(dbenv); +#endif return (0); -err: __db_err(dbenv, "unable to lock mutex: %s", strerror(ret)); - return (ret); +err: __db_err(dbenv, ret, "pthread lock failed"); + return (__db_panic(dbenv, ret)); } /* * __db_pthread_mutex_unlock -- - * Release a lock. + * Release a mutex. * - * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, db_mutex_t)); */ int -__db_pthread_mutex_unlock(dbenv, mutexp) +__db_pthread_mutex_unlock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; int i, ret; - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + #ifdef DIAGNOSTIC - if (!mutexp->locked) - (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1); + if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + __db_errx( + dbenv, "pthread unlock failed: lock already unlocked"); + return (__db_panic(dbenv, EACCES)); + } #endif - - if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) { - if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0) + if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) { + RET_SET((pthread_mutex_lock(&mutexp->mutex)), ret); + if (ret != 0) goto err; - mutexp->locked = 0; - - if ((ret = pthread_cond_signal(&mutexp->cond)) != 0) - return (ret); + F_CLR(mutexp, DB_MUTEX_LOCKED); + RET_SET((pthread_cond_signal(&mutexp->cond)), ret); + if (ret != 0) + goto err; } else - mutexp->locked = 0; + F_CLR(mutexp, DB_MUTEX_LOCKED); /* See comment above; workaround for [#2471]. */ i = PTHREAD_UNLOCK_ATTEMPTS; do { - ret = pthread_mutex_unlock(&mutexp->mutex); + RET_SET((pthread_mutex_unlock(&mutexp->mutex)), ret); } while (ret == EFAULT && --i > 0); - return (ret); -err: __db_err(dbenv, "unable to unlock mutex: %s", strerror(ret)); +err: if (ret != 0) { + __db_err(dbenv, ret, "pthread unlock failed"); + return (__db_panic(dbenv, ret)); + } return (ret); } /* * __db_pthread_mutex_destroy -- - * Destroy a DB_MUTEX. + * Destroy a mutex. * - * PUBLIC: int __db_pthread_mutex_destroy __P((DB_MUTEX *)); + * PUBLIC: int __db_pthread_mutex_destroy __P((DB_ENV *, db_mutex_t)); */ int -__db_pthread_mutex_destroy(mutexp) - DB_MUTEX *mutexp; +__db_pthread_mutex_destroy(dbenv, mutex) + DB_ENV *dbenv; + db_mutex_t mutex; { + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; int ret, t_ret; - if (F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + ret = 0; - if (F_ISSET(mutexp, MUTEX_SELF_BLOCK) && - (ret = pthread_cond_destroy(&mutexp->cond)) != 0) - __db_err(NULL, "unable to destroy cond: %s", strerror(ret)); - if ((t_ret = pthread_mutex_destroy(&mutexp->mutex)) != 0) { - __db_err(NULL, "unable to destroy mutex: %s", strerror(t_ret)); + if (F_ISSET(mutexp, DB_MUTEX_SELF_BLOCK)) { + RET_SET((pthread_cond_destroy(&mutexp->cond)), ret); + if (ret != 0) + __db_err(dbenv, ret, "unable to destroy cond"); + } + RET_SET((pthread_mutex_destroy(&mutexp->mutex)), t_ret); + if (t_ret != 0) { + __db_err(dbenv, t_ret, "unable to destroy mutex"); if (ret == 0) ret = t_ret; } diff --git a/db/mutex/mut_region.c b/db/mutex/mut_region.c new file mode 100644 index 000000000..6e1b4f3ad --- /dev/null +++ b/db/mutex/mut_region.c @@ -0,0 +1,356 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2006 + * Oracle Corporation. All rights reserved. + * + * $Id: mut_region.c,v 12.18 2006/08/24 14:46:16 bostic Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" +#include "dbinc/log.h" +#include "dbinc/lock.h" +#include "dbinc/mp.h" +#include "dbinc/mutex_int.h" + +static int __mutex_region_init __P((DB_ENV *, DB_MUTEXMGR *)); +static size_t __mutex_region_size __P((DB_ENV *)); + +/* + * __mutex_open -- + * Open a mutex region. + * + * PUBLIC: int __mutex_open __P((DB_ENV *)); + */ +int +__mutex_open(dbenv) + DB_ENV *dbenv; +{ + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + db_mutex_t mutex; + u_int i; + int ret; + + /* + * Initialize the DB_ENV handle information if not already initialized. + * + * Align mutexes on the byte boundaries specified by the application. + */ + if (dbenv->mutex_align == 0) + dbenv->mutex_align = MUTEX_ALIGN; + if (dbenv->mutex_tas_spins == 0 && + ((ret = __mutex_set_tas_spins(dbenv, __os_spin(dbenv))) != 0)) + return (ret); + + /* + * If the user didn't set an absolute value on the number of mutexes + * we'll need, figure it out. We're conservative in our allocation, + * we need mutexes for DB handles, group-commit queues and other things + * applications allocate at run-time. The application may have kicked + * up our count to allocate its own mutexes, add that in. + */ + if (dbenv->mutex_cnt == 0) + dbenv->mutex_cnt = + __lock_region_mutex_count(dbenv) + + __log_region_mutex_count(dbenv) + + __memp_region_mutex_count(dbenv) + + dbenv->mutex_inc + 100; + + /* Create/initialize the mutex manager structure. */ + if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MUTEXMGR), &mtxmgr)) != 0) + return (ret); + + /* Join/create the txn region. */ + mtxmgr->reginfo.dbenv = dbenv; + mtxmgr->reginfo.type = REGION_TYPE_MUTEX; + mtxmgr->reginfo.id = INVALID_REGION_ID; + mtxmgr->reginfo.flags = REGION_JOIN_OK; + if (F_ISSET(dbenv, DB_ENV_CREATE)) + F_SET(&mtxmgr->reginfo, REGION_CREATE_OK); + if ((ret = __db_r_attach(dbenv, + &mtxmgr->reginfo, __mutex_region_size(dbenv))) != 0) + goto err; + + /* If we created the region, initialize it. */ + if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE)) + if ((ret = __mutex_region_init(dbenv, mtxmgr)) != 0) + goto err; + + /* Set the local addresses. */ + mtxregion = mtxmgr->reginfo.primary = + R_ADDR(&mtxmgr->reginfo, mtxmgr->reginfo.rp->primary); + mtxmgr->mutex_array = R_ADDR(&mtxmgr->reginfo, mtxregion->mutex_offset); + + dbenv->mutex_handle = mtxmgr; + + /* Allocate initial queue of mutexes. */ + if (dbenv->mutex_iq != NULL) { + DB_ASSERT(dbenv, F_ISSET(&mtxmgr->reginfo, REGION_CREATE)); + for (i = 0; i < dbenv->mutex_iq_next; ++i) { + if ((ret = __mutex_alloc_int( + dbenv, 0, dbenv->mutex_iq[i].alloc_id, + dbenv->mutex_iq[i].flags, &mutex)) != 0) + goto err; + /* + * Confirm we allocated the right index, correcting + * for avoiding slot 0 (MUTEX_INVALID). + */ + DB_ASSERT(dbenv, mutex == i + 1); + } + __os_free(dbenv, dbenv->mutex_iq); + dbenv->mutex_iq = NULL; + + /* + * This is the first place we can test mutexes and we need to + * know if they're working. (They CAN fail, for example on + * SunOS, when using fcntl(2) for locking and using an + * in-memory filesystem as the database environment directory. + * But you knew that, I'm sure -- it probably wasn't worth + * mentioning.) + */ + mutex = MUTEX_INVALID; + if ((ret = + __mutex_alloc(dbenv, MTX_MUTEX_TEST, 0, &mutex) != 0) || + (ret = __mutex_lock(dbenv, mutex)) != 0 || + (ret = __mutex_unlock(dbenv, mutex)) != 0 || + (ret = __mutex_free(dbenv, &mutex)) != 0) { + __db_errx(dbenv, + "Unable to acquire/release a mutex; check configuration"); + goto err; + } + } + + /* + * Initialize thread tracking. We want to do this as early + * as possible in case we die. This sits in the mutex region + * so do it now. + */ + if ((ret = __env_thread_init(dbenv, + F_ISSET(&mtxmgr->reginfo, REGION_CREATE))) != 0) + goto err; + + return (0); + +err: dbenv->mutex_handle = NULL; + if (mtxmgr->reginfo.addr != NULL) + (void)__db_r_detach(dbenv, &mtxmgr->reginfo, 0); + + __os_free(dbenv, mtxmgr); + return (ret); +} + +/* + * __mutex_region_init -- + * Initialize a mutex region in shared memory. + */ +static int +__mutex_region_init(dbenv, mtxmgr) + DB_ENV *dbenv; + DB_MUTEXMGR *mtxmgr; +{ + DB_MUTEXREGION *mtxregion; + DB_MUTEX *mutexp; + db_mutex_t i; + int ret; + void *mutex_array; + + COMPQUIET(mutexp, NULL); + + if ((ret = __db_shalloc(&mtxmgr->reginfo, + sizeof(DB_MUTEXREGION), 0, &mtxmgr->reginfo.primary)) != 0) { + __db_errx(dbenv, + "Unable to allocate memory for the mutex region"); + return (ret); + } + mtxmgr->reginfo.rp->primary = + R_OFFSET(&mtxmgr->reginfo, mtxmgr->reginfo.primary); + mtxregion = mtxmgr->reginfo.primary; + memset(mtxregion, 0, sizeof(*mtxregion)); + + if ((ret = __mutex_alloc( + dbenv, MTX_MUTEX_REGION, 0, &mtxregion->mtx_region)) != 0) + return (ret); + + mtxregion->mutex_size = + (size_t)DB_ALIGN(sizeof(DB_MUTEX), dbenv->mutex_align); + + mtxregion->stat.st_mutex_align = dbenv->mutex_align; + mtxregion->stat.st_mutex_cnt = dbenv->mutex_cnt; + mtxregion->stat.st_mutex_tas_spins = dbenv->mutex_tas_spins; + + /* + * Get a chunk of memory to be used for the mutexes themselves. Each + * piece of the memory must be properly aligned. + * + * The OOB mutex (MUTEX_INVALID) is 0. To make this work, we ignore + * the first allocated slot when we build the free list. We have to + * correct the count by 1 here, though, otherwise our counter will be + * off by 1. + */ + if ((ret = __db_shalloc(&mtxmgr->reginfo, + (mtxregion->stat.st_mutex_cnt + 1) * mtxregion->mutex_size, + mtxregion->stat.st_mutex_align, &mutex_array)) != 0) { + __db_errx(dbenv, + "Unable to allocate memory for mutexes from the region"); + return (ret); + } + + mtxregion->mutex_offset = R_OFFSET(&mtxmgr->reginfo, mutex_array); + mtxmgr->mutex_array = mutex_array; + + /* + * Put the mutexes on a free list and clear the allocated flag. + * + * The OOB mutex (MUTEX_INVALID) is 0, skip it. + * + * The comparison is <, not <=, because we're looking ahead one + * in each link. + */ + for (i = 1; i < mtxregion->stat.st_mutex_cnt; ++i) { + mutexp = MUTEXP_SET(i); + mutexp->flags = 0; + mutexp->mutex_next_link = i + 1; + } + mutexp = MUTEXP_SET(i); + mutexp->flags = 0; + mutexp->mutex_next_link = MUTEX_INVALID; + mtxregion->mutex_next = 1; + mtxregion->stat.st_mutex_free = mtxregion->stat.st_mutex_cnt; + mtxregion->stat.st_mutex_inuse = mtxregion->stat.st_mutex_inuse_max = 0; + + return (0); +} + +/* + * __mutex_dbenv_refresh -- + * Clean up after the mutex region on a close or failed open. + * + * PUBLIC: int __mutex_dbenv_refresh __P((DB_ENV *)); + */ +int +__mutex_dbenv_refresh(dbenv) + DB_ENV *dbenv; +{ + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + REGINFO *reginfo; + int ret; + + mtxmgr = dbenv->mutex_handle; + reginfo = &mtxmgr->reginfo; + mtxregion = mtxmgr->reginfo.primary; + + /* + * If a private region, return the memory to the heap. Not needed for + * filesystem-backed or system shared memory regions, that memory isn't + * owned by any particular process. + */ + if (F_ISSET(dbenv, DB_ENV_PRIVATE)) { +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES + /* + * If destroying the mutex region, return any system resources + * to the system. + */ + __mutex_resource_return(dbenv, reginfo); +#endif + /* Discard the mutex array. */ + __db_shalloc_free( + reginfo, R_ADDR(reginfo, mtxregion->mutex_offset)); + } + + /* Detach from the region. */ + ret = __db_r_detach(dbenv, reginfo, 0); + + __os_free(dbenv, mtxmgr); + + dbenv->mutex_handle = NULL; + + return (ret); +} + +/* + * __mutex_region_size -- + * Return the amount of space needed for the mutex region. + */ +static size_t +__mutex_region_size(dbenv) + DB_ENV *dbenv; +{ + size_t s; + + s = sizeof(DB_MUTEXMGR) + 1024; + s += dbenv->mutex_cnt * + __db_shalloc_size(sizeof(DB_MUTEX), dbenv->mutex_align); + /* + * Allocate space for thread info blocks. Max is only advisory, + * so we allocate 25% more. + */ + s += (dbenv->thr_max + dbenv->thr_max/4) * + __db_shalloc_size(sizeof(DB_THREAD_INFO), sizeof(roff_t)); + s += dbenv->thr_nbucket * + __db_shalloc_size(sizeof(DB_HASHTAB), sizeof(roff_t)); + return (s); +} + +#ifdef HAVE_MUTEX_SYSTEM_RESOURCES +/* + * __mutex_resource_return + * Return any system-allocated mutex resources to the system. + * + * PUBLIC: void __mutex_resource_return __P((DB_ENV *, REGINFO *)); + */ +void +__mutex_resource_return(dbenv, infop) + DB_ENV *dbenv; + REGINFO *infop; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr, mtxmgr_st; + DB_MUTEXREGION *mtxregion; + db_mutex_t i; + void *orig_handle; + + /* + * This routine is called in two cases: when discarding the regions + * from a previous Berkeley DB run, during recovery, and two, when + * discarding regions as we shut down the database environment. + * + * Walk the list of mutexes and destroy any live ones. + * + * This is just like joining a region -- the REGINFO we're handed + * is the same as the one returned by __db_r_attach(), all we have + * to do is fill in the links. + * + * !!! + * The region may be corrupted, of course. We're safe because the + * only things we look at are things that are initialized when the + * region is created, and never modified after that. + */ + memset(&mtxmgr_st, 0, sizeof(mtxmgr_st)); + mtxmgr = &mtxmgr_st; + mtxmgr->reginfo = *infop; + mtxregion = mtxmgr->reginfo.primary = + R_ADDR(&mtxmgr->reginfo, mtxmgr->reginfo.rp->primary); + mtxmgr->mutex_array = R_ADDR(&mtxmgr->reginfo, mtxregion->mutex_offset); + + /* + * This is a little strange, but the mutex_handle is what all of the + * underlying mutex routines will use to determine if they should do + * any work and to find their information. Save/restore the handle + * around the work loop. + * + * The OOB mutex (MUTEX_INVALID) is 0, skip it. + */ + orig_handle = dbenv->mutex_handle; + dbenv->mutex_handle = mtxmgr; + for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) { + mutexp = MUTEXP_SET(i); + if (F_ISSET(mutexp, DB_MUTEX_ALLOCATED)) + (void)__mutex_destroy(dbenv, i); + } + dbenv->mutex_handle = orig_handle; +} +#endif diff --git a/db/mutex/mut_stat.c b/db/mutex/mut_stat.c new file mode 100644 index 000000000..1e642fe2f --- /dev/null +++ b/db/mutex/mut_stat.c @@ -0,0 +1,450 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996-2006 + * Oracle Corporation. All rights reserved. + * + * $Id: mut_stat.c,v 12.17 2006/08/24 14:46:16 bostic Exp $ + */ + +#include "db_config.h" + +#include "db_int.h" +#include "dbinc/db_page.h" +#include "dbinc/db_am.h" +#include "dbinc/mutex_int.h" + +#ifdef HAVE_STATISTICS +static int __mutex_print_all __P((DB_ENV *, u_int32_t)); +static const char *__mutex_print_id __P((int)); +static int __mutex_print_stats __P((DB_ENV *, u_int32_t)); +static void __mutex_print_summary __P((DB_ENV *)); + +/* + * __mutex_stat -- + * DB_ENV->mutex_stat. + * + * PUBLIC: int __mutex_stat __P((DB_ENV *, DB_MUTEX_STAT **, u_int32_t)); + */ +int +__mutex_stat(dbenv, statp, flags) + DB_ENV *dbenv; + DB_MUTEX_STAT **statp; + u_int32_t flags; +{ + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + DB_MUTEX_STAT *stats; + int ret; + + PANIC_CHECK(dbenv); + + if ((ret = __db_fchk(dbenv, + "DB_ENV->mutex_stat", flags, DB_STAT_CLEAR)) != 0) + return (ret); + + *statp = NULL; + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + + if ((ret = __os_umalloc(dbenv, sizeof(DB_MUTEX_STAT), &stats)) != 0) + return (ret); + + MUTEX_SYSTEM_LOCK(dbenv); + + /* + * Most fields are maintained in the underlying region structure. + * Region size and region mutex are not. + */ + *stats = mtxregion->stat; + stats->st_regsize = mtxmgr->reginfo.rp->size; + __mutex_set_wait_info(dbenv, mtxregion->mtx_region, + &stats->st_region_wait, &stats->st_region_nowait); + if (LF_ISSET(DB_STAT_CLEAR)) + __mutex_clear(dbenv, mtxregion->mtx_region); + + MUTEX_SYSTEM_UNLOCK(dbenv); + + *statp = stats; + return (0); +} + +/* + * __mutex_stat_print + * DB_ENV->mutex_stat_print method. + * + * PUBLIC: int __mutex_stat_print __P((DB_ENV *, u_int32_t)); + */ +int +__mutex_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + u_int32_t orig_flags; + int ret; + + PANIC_CHECK(dbenv); + + if ((ret = __db_fchk(dbenv, "DB_ENV->mutex_stat_print", + flags, DB_STAT_ALL | DB_STAT_CLEAR)) != 0) + return (ret); + + orig_flags = flags; + LF_CLR(DB_STAT_CLEAR); + if (flags == 0 || LF_ISSET(DB_STAT_ALL)) { + ret = __mutex_print_stats(dbenv, orig_flags); + __mutex_print_summary(dbenv); + if (flags == 0 || ret != 0) + return (ret); + } + + if (LF_ISSET(DB_STAT_ALL)) + ret = __mutex_print_all(dbenv, orig_flags); + + return (0); +} + +static void +__mutex_print_summary(dbenv) + DB_ENV *dbenv; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + db_mutex_t i; + u_int32_t counts[MTX_MAX_ENTRY + 2]; + int alloc_id; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + memset(counts, 0, sizeof(counts)); + + for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) { + mutexp = MUTEXP_SET(i); + + if (!F_ISSET(mutexp, DB_MUTEX_ALLOCATED)) + counts[0]++; + else if (mutexp->alloc_id > MTX_MAX_ENTRY) + counts[MTX_MAX_ENTRY + 1]++; + else + counts[mutexp->alloc_id]++; + } + __db_msg(dbenv, "Mutex counts"); + __db_msg(dbenv, "%d\tUnallocated", counts[0]); + for (alloc_id = 1; alloc_id <= MTX_TXN_REGION + 1; alloc_id++) + if (counts[alloc_id] != 0) + __db_msg(dbenv, "%lu\t%s", + (u_long)counts[alloc_id], + __mutex_print_id(alloc_id)); + +} + +/* + * __mutex_print_stats -- + * Display default mutex region statistics. + */ +static int +__mutex_print_stats(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + DB_MUTEX_STAT *sp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + REGINFO *infop; + THREAD_INFO *thread; + int ret; + + if ((ret = __mutex_stat(dbenv, &sp, LF_ISSET(DB_STAT_CLEAR))) != 0) + return (ret); + + if (LF_ISSET(DB_STAT_ALL)) + __db_msg(dbenv, "Default mutex region information:"); + + __db_dlbytes(dbenv, "Mutex region size", + (u_long)0, (u_long)0, (u_long)sp->st_regsize); + __db_dl_pct(dbenv, + "The number of region locks that required waiting", + (u_long)sp->st_region_wait, DB_PCT(sp->st_region_wait, + sp->st_region_wait + sp->st_region_nowait), NULL); + STAT_ULONG("Mutex alignment", sp->st_mutex_align); + STAT_ULONG("Mutex test-and-set spins", sp->st_mutex_tas_spins); + STAT_ULONG("Mutex total count", sp->st_mutex_cnt); + STAT_ULONG("Mutex free count", sp->st_mutex_free); + STAT_ULONG("Mutex in-use count", sp->st_mutex_inuse); + STAT_ULONG("Mutex maximum in-use count", sp->st_mutex_inuse_max); + + __os_ufree(dbenv, sp); + + /* + * Dump out the info we have on thread tracking, we do it here only + * because we share the region. + */ + if (dbenv->thr_hashtab != NULL) { + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + infop = &mtxmgr->reginfo; + thread = R_ADDR(infop, mtxregion->thread_off); + STAT_ULONG("Thread blocks allocated", thread->thr_count); + STAT_ULONG("Thread allocation threshold", thread->thr_max); + STAT_ULONG("Thread hash buckets", thread->thr_nbucket); + } + + return (0); +} + +/* + * __mutex_print_all -- + * Display debugging mutex region statistics. + */ +static int +__mutex_print_all(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + static const FN fn[] = { + { DB_MUTEX_ALLOCATED, "alloc" }, + { DB_MUTEX_LOCKED, "locked" }, + { DB_MUTEX_LOGICAL_LOCK, "logical" }, + { DB_MUTEX_PROCESS_ONLY, "process-private" }, + { DB_MUTEX_SELF_BLOCK, "self-block" }, + { 0, NULL } + }; + DB_MSGBUF mb, *mbp; + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + db_mutex_t i; + + DB_MSGBUF_INIT(&mb); + mbp = &mb; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + + __db_print_reginfo(dbenv, &mtxmgr->reginfo, "Mutex"); + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + + __db_msg(dbenv, "DB_MUTEXREGION structure:"); + __mutex_print_debug_single(dbenv, + "DB_MUTEXREGION region mutex", mtxregion->mtx_region, flags); + STAT_ULONG("Size of the aligned mutex", mtxregion->mutex_size); + STAT_ULONG("Next free mutex", mtxregion->mutex_next); + + /* + * The OOB mutex (MUTEX_INVALID) is 0, skip it. + * + * We're not holding the mutex region lock, so we're racing threads of + * control allocating mutexes. That's OK, it just means we display or + * clear statistics while mutexes are moving. + */ + __db_msg(dbenv, "%s", DB_GLOBAL(db_line)); + __db_msg(dbenv, "mutex\twait/nowait, pct wait, holder, flags"); + for (i = 1; i <= mtxregion->stat.st_mutex_cnt; ++i, ++mutexp) { + mutexp = MUTEXP_SET(i); + + if (!F_ISSET(mutexp, DB_MUTEX_ALLOCATED)) + continue; + + __db_msgadd(dbenv, mbp, "%5lu\t", (u_long)i); + + __mutex_print_debug_stats(dbenv, mbp, i, flags); + + if (mutexp->alloc_id != 0) + __db_msgadd(dbenv, + mbp, ", %s", __mutex_print_id(mutexp->alloc_id)); + + __db_prflags(dbenv, mbp, mutexp->flags, fn, " (", ")"); + + DB_MSGBUF_FLUSH(dbenv, mbp); + } + + return (0); +} + +/* + * __mutex_print_debug_single -- + * Print mutex internal debugging statistics for a single mutex on a + * single output line. + * + * PUBLIC: void __mutex_print_debug_single + * PUBLIC: __P((DB_ENV *, const char *, db_mutex_t, u_int32_t)); + */ +void +__mutex_print_debug_single(dbenv, tag, mutex, flags) + DB_ENV *dbenv; + const char *tag; + db_mutex_t mutex; + u_int32_t flags; +{ + DB_MSGBUF mb, *mbp; + + DB_MSGBUF_INIT(&mb); + mbp = &mb; + + __db_msgadd(dbenv, mbp, "%lu\t%s ", (u_long)mutex, tag); + __mutex_print_debug_stats(dbenv, mbp, mutex, flags); + DB_MSGBUF_FLUSH(dbenv, mbp); +} + +/* + * __mutex_print_debug_stats -- + * Print mutex internal debugging statistics, that is, the statistics + * in the [] square brackets. + * + * PUBLIC: void __mutex_print_debug_stats + * PUBLIC: __P((DB_ENV *, DB_MSGBUF *, db_mutex_t, u_int32_t)); + */ +void +__mutex_print_debug_stats(dbenv, mbp, mutex, flags) + DB_ENV *dbenv; + DB_MSGBUF *mbp; + db_mutex_t mutex; + u_int32_t flags; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + u_long value; + char buf[DB_THREADID_STRLEN]; + + if (mutex == MUTEX_INVALID) { + __db_msgadd(dbenv, mbp, "[!Set]"); + return; + } + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + + __db_msgadd(dbenv, mbp, "["); + if ((value = mutexp->mutex_set_wait) < 10000000) + __db_msgadd(dbenv, mbp, "%lu", value); + else + __db_msgadd(dbenv, mbp, "%luM", value / 1000000); + if ((value = mutexp->mutex_set_nowait) < 10000000) + __db_msgadd(dbenv, mbp, "/%lu", value); + else + __db_msgadd(dbenv, mbp, "/%luM", value / 1000000); + + __db_msgadd(dbenv, mbp, " %d%%", + DB_PCT(mutexp->mutex_set_wait, + mutexp->mutex_set_wait + mutexp->mutex_set_nowait)); + + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) + __db_msgadd(dbenv, mbp, " %s]", + dbenv->thread_id_string(dbenv, + mutexp->pid, mutexp->tid, buf)); + else + __db_msgadd(dbenv, mbp, " !Own]"); + + if (LF_ISSET(DB_STAT_CLEAR)) + __mutex_clear(dbenv, mutex); +} + +static const char * +__mutex_print_id(alloc_id) + int alloc_id; +{ + switch (alloc_id) { + case MTX_APPLICATION: return ("application allocated"); + case MTX_DB_HANDLE: return ("db handle"); + case MTX_ENV_DBLIST: return ("env dblist"); + case MTX_ENV_REGION: return ("env region"); + case MTX_LOCK_REGION: return ("lock region"); + case MTX_LOGICAL_LOCK: return ("logical lock"); + case MTX_LOG_FILENAME: return ("log filename"); + case MTX_LOG_FLUSH: return ("log flush"); + case MTX_LOG_HANDLE: return ("log handle"); + case MTX_LOG_REGION: return ("log region"); + case MTX_MPOOLFILE_HANDLE: return ("mpoolfile handle"); + case MTX_MPOOL_FH: return ("mpool filehandle"); + case MTX_MPOOL_FILE_BUCKET: return ("mpool file bucket"); + case MTX_MPOOL_HANDLE: return ("mpool handle"); + case MTX_MPOOL_HASH_BUCKET: return ("mpool hash bucket"); + case MTX_MPOOL_IO: return ("mpool buffer I/O"); + case MTX_MPOOL_REGION: return ("mpool region"); + case MTX_REP_DATABASE: return ("replication database"); + case MTX_REP_REGION: return ("replication region"); + case MTX_SEQUENCE: return ("sequence"); + case MTX_TWISTER: return ("twister"); + case MTX_TXN_ACTIVE: return ("txn active list"); + case MTX_TXN_COMMIT: return ("txn commit"); + case MTX_TXN_MVCC: return ("txn mvcc"); + case MTX_TXN_REGION: return ("txn region"); + default: return ("unknown mutex type"); + } + /* NOTREACHED */ +} + +/* + * __mutex_set_wait_info -- + * Return mutex statistics. + * + * PUBLIC: void __mutex_set_wait_info + * PUBLIC: __P((DB_ENV *, db_mutex_t, u_int32_t *, u_int32_t *)); + */ +void +__mutex_set_wait_info(dbenv, mutex, waitp, nowaitp) + DB_ENV *dbenv; + db_mutex_t mutex; + u_int32_t *waitp, *nowaitp; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + + *waitp = mutexp->mutex_set_wait; + *nowaitp = mutexp->mutex_set_nowait; +} + +/* + * __mutex_clear -- + * Clear mutex statistics. + * + * PUBLIC: void __mutex_clear __P((DB_ENV *, db_mutex_t)); + */ +void +__mutex_clear(dbenv, mutex) + DB_ENV *dbenv; + db_mutex_t mutex; +{ + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + + mutexp->mutex_set_wait = mutexp->mutex_set_nowait = 0; +} + +#else /* !HAVE_STATISTICS */ + +int +__mutex_stat(dbenv, statp, flags) + DB_ENV *dbenv; + DB_MUTEX_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} + +int +__mutex_stat_print(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv)); +} +#endif diff --git a/db/mutex/mut_tas.c b/db/mutex/mut_tas.c index 08d7ed876..bbe25fa8e 100644 --- a/db/mutex/mut_tas.c +++ b/db/mutex/mut_tas.c @@ -1,132 +1,113 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996-2004 - * Sleepycat Software. All rights reserved. + * Copyright (c) 1996-2006 + * Oracle Corporation. All rights reserved. * - * $Id: mut_tas.c,v 11.44 2004/09/15 19:14:49 bostic Exp $ + * $Id: mut_tas.c,v 12.20 2006/08/24 14:46:16 bostic Exp $ */ #include "db_config.h" -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <stdlib.h> -#include <string.h> -#endif +#include "db_int.h" /* * This is where we load in the actual test-and-set mutex code. */ #define LOAD_ACTUAL_MUTEX_CODE -#include "db_int.h" +#include "dbinc/mutex_int.h" /* * __db_tas_mutex_init -- - * Initialize a DB_MUTEX. + * Initialize a test-and-set mutex. * - * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t)); + * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t)); */ int -__db_tas_mutex_init(dbenv, mutexp, flags) +__db_tas_mutex_init(dbenv, mutex, flags) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; u_int32_t flags; { - u_int32_t save; + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + int ret; + + COMPQUIET(flags, 0); + + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); /* Check alignment. */ - if ((uintptr_t)mutexp & (MUTEX_ALIGN - 1)) { - __db_err(dbenv, - "__db_tas_mutex_init: mutex not appropriately aligned"); + if (((uintptr_t)mutexp & (dbenv->mutex_align - 1)) != 0) { + __db_errx(dbenv, "TAS: mutex not appropriately aligned"); return (EINVAL); } - /* - * The only setting/checking of the MUTEX_MPOOL flag is in the mutex - * mutex allocation code (__db_mutex_alloc/free). Preserve only that - * flag. This is safe because even if this flag was never explicitly - * set, but happened to be set in memory, it will never be checked or - * acted upon. - */ - save = F_ISSET(mutexp, MUTEX_MPOOL); - memset(mutexp, 0, sizeof(*mutexp)); - F_SET(mutexp, save); - - /* - * If this is a thread lock or the process has told us that there are - * no other processes in the environment, use thread-only locks, they - * are faster in some cases. - * - * This is where we decide to ignore locks we don't need to set -- if - * the application isn't threaded, there aren't any threads to block. - */ - if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) { - if (!F_ISSET(dbenv, DB_ENV_THREAD)) { - F_SET(mutexp, MUTEX_IGNORE); - return (0); - } + if (MUTEX_INIT(&mutexp->tas)) { + ret = __os_get_syserr(); + __db_syserr(dbenv, ret, "TAS: mutex initialize"); + return (__os_posix_err(ret)); } - - if (LF_ISSET(MUTEX_LOGICAL_LOCK)) - F_SET(mutexp, MUTEX_LOGICAL_LOCK); - - /* Initialize the lock. */ - if (MUTEX_INIT(&mutexp->tas)) - return (__os_get_errno()); - -#ifdef HAVE_MUTEX_SYSTEM_RESOURCES - mutexp->reg_off = INVALID_ROFF; -#endif - F_SET(mutexp, MUTEX_INITED); - return (0); } /* * __db_tas_mutex_lock - * Lock on a mutex, logically blocking if necessary. + * Lock on a mutex, blocking if necessary. * - * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, db_mutex_t)); */ int -__db_tas_mutex_lock(dbenv, mutexp) +__db_tas_mutex_lock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; u_int32_t nspins; u_long ms, max_ms; - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + +#ifdef HAVE_STATISTICS + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) + ++mutexp->mutex_set_wait; + else + ++mutexp->mutex_set_nowait; +#endif + /* * Wait 1ms initially, up to 10ms for mutexes backing logical database * locks, and up to 25 ms for mutual exclusion data structure mutexes. * SR: #7675 */ ms = 1; - max_ms = F_ISSET(mutexp, MUTEX_LOGICAL_LOCK) ? 10 : 25; + max_ms = F_ISSET(mutexp, DB_MUTEX_LOGICAL_LOCK) ? 10 : 25; loop: /* Attempt to acquire the resource for N spins. */ - for (nspins = dbenv->tas_spins; nspins > 0; --nspins) { + for (nspins = + mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) { #ifdef HAVE_MUTEX_HPPA_MSEM_INIT relock: #endif #ifdef HAVE_MUTEX_S390_CC_ASSEMBLY tsl_t zero = 0; #endif - if ( -#ifdef MUTEX_SET_TEST /* - * If using test-and-set mutexes, and we know the "set" value, - * we can avoid interlocked instructions since they're unlikely - * to succeed. + * Avoid interlocked instructions until they're likely to + * succeed. */ - mutexp->tas || -#endif - !MUTEX_SET(&mutexp->tas)) { + if (F_ISSET(mutexp, DB_MUTEX_LOCKED) || + !MUTEX_SET(&mutexp->tas)) { /* * Some systems (notably those with newer Intel CPUs) * need a small pause here. [#6975] @@ -141,71 +122,95 @@ relock: /* * HP semaphores are unlocked automatically when a holding * process exits. If the mutex appears to be locked - * (mutexp->locked != 0) but we got here, assume this has - * happened. Stick our own pid into mutexp->locked and + * (F_ISSET(DB_MUTEX_LOCKED)) but we got here, assume this + * has happened. Set the pid and tid into the mutex and * lock again. (The default state of the mutexes used to * block in __lock_get_internal is locked, so exiting with * a locked mutex is reasonable behavior for a process that * happened to initialize or use one of them.) */ - if (mutexp->locked != 0) { - __os_id(&mutexp->locked); + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); goto relock; } /* - * If we make it here, locked == 0, the diagnostic won't fire, - * and we were really unlocked by someone calling the - * DB mutex unlock function. + * If we make it here, the mutex isn't locked, the diagnostic + * won't fire, and we were really unlocked by someone calling + * the DB mutex unlock function. */ #endif #ifdef DIAGNOSTIC - if (mutexp->locked != 0) - __db_err(dbenv, - "__db_tas_mutex_lock: ERROR: lock currently in use: ID: %lu", - (u_long)mutexp->locked); + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + char buf[DB_THREADID_STRLEN]; + __db_errx(dbenv, + "TAS lock failed: lock currently in use: ID: %s", + dbenv->thread_id_string(dbenv, + mutexp->pid, mutexp->tid, buf)); + return (__db_panic(dbenv, EACCES)); + } #endif -#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT) - __os_id(&mutexp->locked); + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); + +#ifdef DIAGNOSTIC + /* + * We want to switch threads as often as possible. Yield + * every time we get a mutex to ensure contention. + */ + if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) + __os_yield(dbenv); #endif - if (ms == 1) - ++mutexp->mutex_set_nowait; - else - ++mutexp->mutex_set_wait; return (0); } - /* - * Yield the processor. - */ - __os_yield(NULL, ms * USEC_PER_MS); + /* Wait for the lock to become available. */ + __os_sleep(dbenv, 0, ms * USEC_PER_MS); if ((ms <<= 1) > max_ms) ms = max_ms; + /* + * We're spinning. The environment might be hung, and somebody else + * has already recovered it. The first thing recovery does is panic + * the environment. Check to see if we're never going to get this + * mutex. + */ + PANIC_CHECK(dbenv); + goto loop; } /* * __db_tas_mutex_unlock -- - * Release a lock. + * Release a mutex. * - * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, db_mutex_t)); */ int -__db_tas_mutex_unlock(dbenv, mutexp) +__db_tas_mutex_unlock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + #ifdef DIAGNOSTIC - if (!mutexp->locked) - __db_err(dbenv, - "__db_tas_mutex_unlock: ERROR: lock already unlocked"); -#endif -#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT) - mutexp->locked = 0; + if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + __db_errx(dbenv, "TAS unlock failed: lock already unlocked"); + return (__db_panic(dbenv, EACCES)); + } #endif + F_CLR(mutexp, DB_MUTEX_LOCKED); MUTEX_UNSET(&mutexp->tas); @@ -214,17 +219,26 @@ __db_tas_mutex_unlock(dbenv, mutexp) /* * __db_tas_mutex_destroy -- - * Destroy a DB_MUTEX. + * Destroy a mutex. * - * PUBLIC: int __db_tas_mutex_destroy __P((DB_MUTEX *)); + * PUBLIC: int __db_tas_mutex_destroy __P((DB_ENV *, db_mutex_t)); */ int -__db_tas_mutex_destroy(mutexp) - DB_MUTEX *mutexp; +__db_tas_mutex_destroy(dbenv, mutex) + DB_ENV *dbenv; + db_mutex_t mutex; { - if (F_ISSET(mutexp, MUTEX_IGNORE)) + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; + + if (!MUTEX_ON(dbenv)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + MUTEX_DESTROY(&mutexp->tas); return (0); diff --git a/db/mutex/mut_win32.c b/db/mutex/mut_win32.c index b51000321..083784310 100644 --- a/db/mutex/mut_win32.c +++ b/db/mutex/mut_win32.c @@ -1,113 +1,136 @@ /* * See the file LICENSE for redistribution information. * - * Copyright (c) 2002-2004 - * Sleepycat Software. All rights reserved. + * Copyright (c) 2002-2006 + * Oracle Corporation. All rights reserved. * - * $Id: mut_win32.c,v 1.18 2004/07/06 21:06:39 mjc Exp $ + * $Id: mut_win32.c,v 12.21 2006/08/24 14:46:16 bostic Exp $ */ #include "db_config.h" -#ifndef NO_SYSTEM_INCLUDES -#include <sys/types.h> - -#include <string.h> -#endif +#include "db_int.h" /* * This is where we load in the actual test-and-set mutex code. */ #define LOAD_ACTUAL_MUTEX_CODE -#include "db_int.h" +#include "dbinc/mutex_int.h" /* We don't want to run this code even in "ordinary" diagnostic mode. */ #undef MUTEX_DIAG +/* + * Common code to get an event handle. This is executed whenever a mutex + * blocks, or when unlocking a mutex that a thread is waiting on. We can't + * keep these handles around, since the mutex structure is in shared memory, + * and each process gets its own handle value. + * + * We pass security attributes so that the created event is accessible by all + * users, in case a Windows service is sharing an environment with a local + * process run as a different user. + */ static _TCHAR hex_digits[] = _T("0123456789abcdef"); +static SECURITY_DESCRIPTOR null_sd; +static SECURITY_ATTRIBUTES all_sa; +static int security_initialized = 0; + +static __inline int get_handle(dbenv, mutexp, eventp) + DB_ENV *dbenv; + DB_MUTEX *mutexp; + HANDLE *eventp; +{ + _TCHAR idbuf[] = _T("db.m00000000"); + _TCHAR *p = idbuf + 12; + int ret = 0; + u_int32_t id; + + for (id = (mutexp)->id; id != 0; id >>= 4) + *--p = hex_digits[id & 0xf]; + + if (!security_initialized) { + InitializeSecurityDescriptor(&null_sd, + SECURITY_DESCRIPTOR_REVISION); + SetSecurityDescriptorDacl(&null_sd, TRUE, 0, FALSE); + all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); + all_sa.bInheritHandle = FALSE; + all_sa.lpSecurityDescriptor = &null_sd; + security_initialized = 1; + } + + if ((*eventp = CreateEvent(&all_sa, FALSE, FALSE, idbuf)) == NULL) { + ret = __os_get_syserr(); + __db_syserr(dbenv, ret, "Win32 create event failed"); + } -#define GET_HANDLE(mutexp, event) do { \ - _TCHAR idbuf[] = _T("db.m00000000"); \ - _TCHAR *p = idbuf + 12; \ - u_int32_t id; \ - \ - for (id = (mutexp)->id; id != 0; id >>= 4) \ - *--p = hex_digits[id & 0xf]; \ - event = CreateEvent(NULL, FALSE, FALSE, idbuf); \ - if (event == NULL) \ - return (__os_get_errno()); \ -} while (0) + return (ret); +} /* * __db_win32_mutex_init -- - * Initialize a DB_MUTEX. + * Initialize a Win32 mutex. * - * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t)); + * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, db_mutex_t, u_int32_t)); */ int -__db_win32_mutex_init(dbenv, mutexp, flags) +__db_win32_mutex_init(dbenv, mutex, flags) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; u_int32_t flags; { - u_int32_t save; - - /* - * The only setting/checking of the MUTEX_MPOOL flag is in the mutex - * mutex allocation code (__db_mutex_alloc/free). Preserve only that - * flag. This is safe because even if this flag was never explicitly - * set, but happened to be set in memory, it will never be checked or - * acted upon. - */ - save = F_ISSET(mutexp, MUTEX_MPOOL); - memset(mutexp, 0, sizeof(*mutexp)); - F_SET(mutexp, save); + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; - /* - * If this is a thread lock or the process has told us that there are - * no other processes in the environment, and the application isn't - * threaded, there aren't any threads to block. - */ - if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) { - if (!F_ISSET(dbenv, DB_ENV_THREAD)) { - F_SET(mutexp, MUTEX_IGNORE); - return (0); - } - } + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp); - F_SET(mutexp, MUTEX_INITED); + return (0); } /* * __db_win32_mutex_lock - * Lock on a mutex, logically blocking if necessary. + * Lock on a mutex, blocking if necessary. * - * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, db_mutex_t)); */ int -__db_win32_mutex_lock(dbenv, mutexp) +__db_win32_mutex_lock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; HANDLE event; u_int32_t nspins; - int ret, ms; + int ms, ret; #ifdef MUTEX_DIAG LARGE_INTEGER now; #endif - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); + event = NULL; ms = 50; ret = 0; loop: /* Attempt to acquire the resource for N spins. */ - for (nspins = dbenv->tas_spins; nspins > 0; --nspins) { - if (!MUTEX_SET(&mutexp->tas)) { + for (nspins = + mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) { + /* + * We can avoid the (expensive) interlocked instructions if + * the mutex is already "set". + */ + if (mutexp->tas || !MUTEX_SET(&mutexp->tas)) { /* * Some systems (notably those with newer Intel CPUs) * need a small pause here. [#6975] @@ -119,17 +142,26 @@ loop: /* Attempt to acquire the resource for N spins. */ } #ifdef DIAGNOSTIC - if (mutexp->locked) - __db_err(dbenv, - "__db_win32_mutex_lock: mutex double-locked!"); - - __os_id(&mutexp->locked); + if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + char buf[DB_THREADID_STRLEN]; + __db_errx(dbenv, + "Win32 lock failed: mutex already locked by %s", + dbenv->thread_id_string(dbenv, + mutexp->pid, mutexp->tid, buf)); + return (__db_panic(dbenv, EACCES)); + } #endif + F_SET(mutexp, DB_MUTEX_LOCKED); + dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); + CHECK_MTX_THREAD(dbenv, mutexp); +#ifdef HAVE_STATISTICS if (event == NULL) ++mutexp->mutex_set_nowait; - else { + else ++mutexp->mutex_set_wait; +#endif + if (event != NULL) { CloseHandle(event); InterlockedDecrement(&mutexp->nwaiters); #ifdef MUTEX_DIAG @@ -142,6 +174,15 @@ loop: /* Attempt to acquire the resource for N spins. */ #endif } +#ifdef DIAGNOSTIC + /* + * We want to switch threads as often as possible. Yield + * every time we get a mutex to ensure contention. + */ + if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) + __os_yield(dbenv); +#endif + return (0); } @@ -158,79 +199,92 @@ loop: /* Attempt to acquire the resource for N spins. */ now.QuadPart, mutexp, mutexp->id); #endif InterlockedIncrement(&mutexp->nwaiters); - GET_HANDLE(mutexp, event); + if ((ret = get_handle(dbenv, mutexp, &event)) != 0) + goto err; + } + if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) { + ret = __os_get_syserr(); + goto err; } - if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) - return (__os_get_errno()); if ((ms <<= 1) > MS_PER_SEC) ms = MS_PER_SEC; + PANIC_CHECK(dbenv); goto loop; + +err: __db_syserr(dbenv, ret, "Win32 lock failed"); + return (__db_panic(dbenv, __os_posix_err(ret))); } /* * __db_win32_mutex_unlock -- - * Release a lock. + * Release a mutex. * - * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *)); + * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, db_mutex_t)); */ int -__db_win32_mutex_unlock(dbenv, mutexp) +__db_win32_mutex_unlock(dbenv, mutex) DB_ENV *dbenv; - DB_MUTEX *mutexp; + db_mutex_t mutex; { - int ret; + DB_MUTEX *mutexp; + DB_MUTEXMGR *mtxmgr; + DB_MUTEXREGION *mtxregion; HANDLE event; + int ret; #ifdef MUTEX_DIAG - LARGE_INTEGER now; + LARGE_INTEGER now; #endif - - if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE)) + if (!MUTEX_ON(dbenv) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) return (0); -#ifdef DIAGNOSTIC - if (!mutexp->tas || !mutexp->locked) - __db_err(dbenv, - "__db_win32_mutex_unlock: ERROR: lock already unlocked"); + mtxmgr = dbenv->mutex_handle; + mtxregion = mtxmgr->reginfo.primary; + mutexp = MUTEXP_SET(mutex); - mutexp->locked = 0; +#ifdef DIAGNOSTIC + if (!mutexp->tas || !F_ISSET(mutexp, DB_MUTEX_LOCKED)) { + __db_errx(dbenv, "Win32 unlock failed: lock already unlocked"); + return (__db_panic(dbenv, EACCES)); + } #endif + F_CLR(mutexp, DB_MUTEX_LOCKED); MUTEX_UNSET(&mutexp->tas); - ret = 0; - if (mutexp->nwaiters > 0) { - GET_HANDLE(mutexp, event); + if ((ret = get_handle(dbenv, mutexp, &event)) != 0) + goto err; #ifdef MUTEX_DIAG QueryPerformanceCounter(&now); printf("[%I64d]: Signalling mutex %p, id %d\n", now.QuadPart, mutexp, mutexp->id); #endif - if (!PulseEvent(event)) - ret = __os_get_errno(); + if (!PulseEvent(event)) { + ret = __os_get_syserr(); + CloseHandle(event); + goto err; + } CloseHandle(event); } -#ifdef DIAGNOSTIC - if (ret != 0) - __db_err(dbenv, - "__db_win32_mutex_unlock: ERROR: unlock failed"); -#endif + return (0); - return (ret); +err: __db_syserr(dbenv, ret, "Win32 unlock failed"); + return (__db_panic(dbenv, __os_posix_err(ret))); } /* * __db_win32_mutex_destroy -- - * Destroy a DB_MUTEX - noop with this implementation. + * Destroy a mutex. * - * PUBLIC: int __db_win32_mutex_destroy __P((DB_MUTEX *)); + * PUBLIC: int __db_win32_mutex_destroy __P((DB_ENV *, db_mutex_t)); */ int -__db_win32_mutex_destroy(mutexp) - DB_MUTEX *mutexp; +__db_win32_mutex_destroy(dbenv, mutex) + DB_ENV *dbenv; + db_mutex_t mutex; { return (0); } diff --git a/db/mutex/tm.c b/db/mutex/tm.c index ebfb2f329..de647fa1f 100644 --- a/db/mutex/tm.c +++ b/db/mutex/tm.c @@ -1,50 +1,76 @@ /* * Standalone mutex tester for Berkeley DB mutexes. + * + * $Id: tm.c,v 12.14 2006/07/17 15:16:46 bostic Exp $ */ + #include "db_config.h" -#include <sys/types.h> -#include <sys/mman.h> -#include <sys/stat.h> -#include <sys/wait.h> +#include "db_int.h" -#include <errno.h> -#include <fcntl.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> +#include <sys/wait.h> #if defined(MUTEX_THREAD_TEST) #include <pthread.h> #endif -#include "db_int.h" +#ifdef DB_WIN32 +extern int getopt(int, char * const *, const char *); + +typedef HANDLE os_pid_t; +typedef HANDLE os_thread_t; -#ifndef HAVE_QNX -#define shm_open open -#define shm_unlink remove +#define os_thread_create(thrp, attr, func, arg) \ + (((*(thrp) = CreateThread(NULL, 0, \ + (LPTHREAD_START_ROUTINE)(func), (arg), 0, NULL)) == NULL) ? -1 : 0) +#define os_thread_join(thr, statusp) \ + ((WaitForSingleObject((thr), INFINITE) == WAIT_OBJECT_0) && \ + GetExitCodeThread((thr), (LPDWORD)(statusp)) ? 0 : -1) +#define os_thread_self() GetCurrentThreadId() + +#else /* !DB_WIN32 */ + +typedef pid_t os_pid_t; + +#ifdef MUTEX_THREAD_TEST +typedef pthread_t os_thread_t; #endif -void exec_proc(u_long, char *, char *); -void map_file(u_int8_t **, u_int8_t **, u_int8_t **, int *); -void tm_file_init(void); -void run_locker(u_long); -void *run_lthread(void *); -void run_wakeup(u_long); -void *run_wthread(void *); -void tm_mutex_destroy(void); -void tm_mutex_init(void); -void tm_mutex_stats(void); -void unmap_file(u_int8_t *, int); -int usage(void); - -#define MT_FILE "mutex.file" -#define MT_FILE_QUIT "mutex.file.quit" - -DB_ENV dbenv; /* Fake out DB. */ +#define os_thread_create(thrp, attr, func, arg) \ + pthread_create((thrp), (attr), (func), (arg)) +#define os_thread_join(thr, statusp) pthread_join((thr), (statusp)) +#define os_thread_self() pthread_self() +#endif + +#define OS_BAD_PID (os_pid_t)-1 + +#define TESTDIR "TESTDIR" /* Working area */ +#define MT_FILE "TESTDIR/mutex.file" +#define MT_FILE_QUIT "TESTDIR/mutex.file.quit" + +/* + * The backing file layout: + * TM[1] per-thread mutex array lock + * TM[nthreads] per-thread mutex array + * TM[maxlocks] per-lock mutex array + */ +typedef struct { + db_mutex_t mutex; /* Mutex. */ + u_long id; /* Holder's ID. */ + u_int wakeme; /* Request to awake. */ +} TM; + +DB_ENV *dbenv; /* Backing environment */ size_t len; /* Backing file size. */ -int align; /* Mutex alignment in file. */ + +u_int8_t *gm_addr; /* Global mutex */ +u_int8_t *lm_addr; /* Locker mutexes */ +u_int8_t *tm_addr; /* Thread mutexes */ + +#ifdef MUTEX_THREAD_TEST +os_thread_t *kidsp; /* Locker threads */ +os_thread_t wakep; /* Wakeup thread */ +#endif int maxlocks = 20; /* -l: Backing locks. */ int nlocks = 10000; /* -n: Locks per processes. */ @@ -52,12 +78,24 @@ int nprocs = 20; /* -p: Processes. */ int nthreads = 1; /* -t: Threads. */ int verbose; /* -v: Verbosity. */ -typedef struct { - DB_MUTEX mutex; /* Mutex. */ - u_long id; /* Holder's ID. */ -#define MUTEX_WAKEME 0x01 /* Request to awake. */ - u_int flags; -} TM; +int locker_start(u_long); +int locker_wait(void); +void map_file(u_int8_t **, u_int8_t **, u_int8_t **, DB_FH **); +os_pid_t os_spawn(const char *, char *const[]); +int os_wait(os_pid_t *, int); +void *run_lthread(void *); +void *run_wthread(void *); +os_pid_t spawn_proc(u_long, char *, char *); +void tm_env_close(void); +int tm_env_init(void); +void tm_file_init(void); +void tm_mutex_destroy(void); +void tm_mutex_init(void); +void tm_mutex_stats(void); +void unmap_file(u_int8_t *, DB_FH *); +int usage(void); +int wakeup_start(u_long); +int wakeup_wait(void); int main(argc, argv) @@ -67,12 +105,11 @@ main(argc, argv) enum {LOCKER, WAKEUP, PARENT} rtype; extern int optind; extern char *optarg; - pid_t pid; + os_pid_t wakeup_pid, *pids; u_long id; - int ch, fd, eval, i, status; - char *p, *tmpath; - - __os_spin(&dbenv); /* Fake out DB. */ + DB_FH *fhp, *map_fhp; + int ch, err, i; + char *p, *tmpath, cmd[1024]; rtype = PARENT; id = 0; @@ -122,173 +159,211 @@ main(argc, argv) argv += optind; /* - * The file layout: - * TM[1] per-thread mutex array lock - * TM[nthreads] per-thread mutex array - * TM[maxlocks] per-lock mutex array + * If we're not running a multi-process test, we should be running + * a multi-thread test. */ - align = DB_ALIGN(sizeof(TM), MUTEX_ALIGN); - len = align * (1 + nthreads * nprocs + maxlocks); - - switch (rtype) { - case PARENT: - break; - case LOCKER: - run_locker(id); - return (EXIT_SUCCESS); - case WAKEUP: - run_wakeup(id); - return (EXIT_SUCCESS); + if (nprocs == 1 && nthreads == 1) { + fprintf(stderr, + "tm: running in a single process requires multiple threads\n"); + return (EXIT_FAILURE); + } + + len = sizeof(TM) * (1 + nthreads * nprocs + maxlocks); + + /* + * In the multi-process test, the parent spawns processes that exec + * the original binary, ending up here. Each process joins the DB + * environment separately and then calls the supporting function. + */ + if (rtype == LOCKER || rtype == WAKEUP) { + __os_sleep(dbenv, 3, 0); /* Let everyone catch up. */ + /* Initialize random numbers. */ + srand((u_int)time(NULL) % getpid()); + + if (tm_env_init() != 0) /* Join the environment. */ + exit(EXIT_FAILURE); + /* Join the backing file. */ + map_file(&gm_addr, &tm_addr, &lm_addr, &map_fhp); + if (verbose) + printf( + "Backing file: global (%#lx), threads (%#lx), locks (%#lx)\n", + (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr); + + if ((rtype == LOCKER ? + locker_start(id) : wakeup_start(id)) != 0) + exit(EXIT_FAILURE); + if ((rtype == LOCKER ? locker_wait() : wakeup_wait()) != 0) + exit(EXIT_FAILURE); + + unmap_file(gm_addr, map_fhp); /* Detach from backing file. */ + + tm_env_close(); /* Detach from environment. */ + + exit(EXIT_SUCCESS); } + /* + * The following code is only executed by the original parent process. + * + * Clean up from any previous runs. + */ + snprintf(cmd, sizeof(cmd), "rm -rf %s", TESTDIR); + (void)system(cmd); + snprintf(cmd, sizeof(cmd), "mkdir %s", TESTDIR); + (void)system(cmd); + printf( "tm: %d processes, %d threads/process, %d lock requests from %d locks\n", nprocs, nthreads, nlocks, maxlocks); - printf( - "tm: mutex alignment %lu, structure alignment %d, backing file %lu bytes\n", - (u_long)MUTEX_ALIGN, align, (u_long)len); + printf("tm: backing file %lu bytes\n", (u_long)len); + + if (tm_env_init() != 0) /* Create the environment. */ + exit(EXIT_FAILURE); tm_file_init(); /* Initialize backing file. */ - tm_mutex_init(); /* Initialize file's mutexes. */ - - for (i = 0; i < nprocs; ++i) { - switch (fork()) { - case -1: - perror("fork"); - return (EXIT_FAILURE); - case 0: - exec_proc(id, tmpath, "locker"); - break; - default: - break; + + /* Map in the backing file. */ + map_file(&gm_addr, &tm_addr, &lm_addr, &map_fhp); + if (verbose) + printf( + "backing file: global (%#lx), threads (%#lx), locks (%#lx)\n", + (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr); + + tm_mutex_init(); /* Initialize mutexes. */ + + if (nprocs > 1) { /* Run the multi-process test. */ + /* Allocate array of locker process IDs. */ + if ((pids = calloc(nprocs, sizeof(os_pid_t))) == NULL) { + fprintf(stderr, "tm: %s\n", strerror(errno)); + goto fail; } - id += nthreads; - } - (void)remove(MT_FILE_QUIT); + /* Spawn locker processes and threads. */ + for (i = 0; i < nprocs; ++i) { + if ((pids[i] = + spawn_proc(id, tmpath, "locker")) == OS_BAD_PID) { + fprintf(stderr, + "tm: failed to spawn a locker\n"); + goto fail; + } + id += nthreads; + } - switch (fork()) { - case -1: - perror("fork"); - return (EXIT_FAILURE); - case 0: - exec_proc(id, tmpath, "wakeup"); - break; - default: - break; - } - ++id; + /* Spawn wakeup process/thread. */ + if ((wakeup_pid = + spawn_proc(id, tmpath, "wakeup")) == OS_BAD_PID) { + fprintf(stderr, "tm: failed to spawn waker\n"); + goto fail; + } + ++id; - /* Wait for locking threads. */ - for (i = 0, eval = EXIT_SUCCESS; i < nprocs; ++i) - if ((pid = wait(&status)) != (pid_t)-1) { - fprintf(stderr, - "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status)); - if (WEXITSTATUS(status) != 0) - eval = EXIT_FAILURE; + /* Wait for all lockers to exit. */ + if ((err = os_wait(pids, nprocs)) != 0) { + fprintf(stderr, "locker wait failed with %d\n", err); + goto fail; } - /* Signal wakeup thread to exit. */ - if ((fd = open(MT_FILE_QUIT, O_WRONLY | O_CREAT, 0664)) == -1) { - fprintf(stderr, "tm: %s\n", strerror(errno)); - status = EXIT_FAILURE; - } - (void)close(fd); + /* Signal wakeup process to exit. */ + if ((err = __os_open( + dbenv, MT_FILE_QUIT, DB_OSO_CREATE, 0664, &fhp)) != 0) { + fprintf(stderr, "tm: open %s\n", db_strerror(err)); + goto fail; + } + (void)__os_closehandle(dbenv, fhp); - /* Wait for wakeup thread. */ - if ((pid = wait(&status)) != (pid_t)-1) { - fprintf(stderr, - "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status)); - if (WEXITSTATUS(status) != 0) - eval = EXIT_FAILURE; - } + /* Wait for wakeup process/thread. */ + if ((err = os_wait(&wakeup_pid, 1)) != 0) { + fprintf(stderr, + "%lu: exited %d\n", (u_long)wakeup_pid, err); + goto fail; + } + } else { /* Run the single-process test. */ + /* Spawn locker threads. */ + if (locker_start(0) != 0) + goto fail; + + /* Spawn wakeup thread. */ + if (wakeup_start(nthreads) != 0) + goto fail; + + /* Wait for all lockers to exit. */ + if (locker_wait() != 0) + goto fail; + + /* Signal wakeup process to exit. */ + if ((err = __os_open( + dbenv, MT_FILE_QUIT, DB_OSO_CREATE, 0664, &fhp)) != 0) { + fprintf(stderr, "tm: open %s\n", db_strerror(err)); + goto fail; + } + (void)__os_closehandle(dbenv, fhp); - (void)remove(MT_FILE_QUIT); + /* Wait for wakeup thread. */ + if (wakeup_wait() != 0) + goto fail; + } - tm_mutex_stats(); /* Display run statistics. */ - tm_mutex_destroy(); /* Destroy region. */ + tm_mutex_stats(); /* Display run statistics. */ + tm_mutex_destroy(); /* Destroy mutexes. */ - printf("tm: exit status: %s\n", - eval == EXIT_SUCCESS ? "success" : "failed!"); - return (eval); -} + unmap_file(gm_addr, map_fhp); /* Detach from backing file. */ -void -exec_proc(id, tmpath, typearg) - u_long id; - char *tmpath, *typearg; -{ - char *argv[10], **ap, b_l[10], b_n[10], b_p[10], b_t[10], b_T[10]; - - ap = &argv[0]; - *ap++ = "tm"; - sprintf(b_l, "-l%d", maxlocks); - *ap++ = b_l; - sprintf(b_n, "-n%d", nlocks); - *ap++ = b_p; - sprintf(b_p, "-p%d", nprocs); - *ap++ = b_n; - sprintf(b_t, "-t%d", nthreads); - *ap++ = b_t; - sprintf(b_T, "-T%s=%lu", typearg, id); - *ap++ = b_T; - if (verbose) - *ap++ = "-v"; + tm_env_close(); /* Detach from environment. */ - *ap = NULL; - execvp(tmpath, argv); + printf("tm: test succeeded\n"); + return (EXIT_SUCCESS); - fprintf(stderr, "%s: %s\n", tmpath, strerror(errno)); - exit(EXIT_FAILURE); +fail: printf("tm: FAILED!\n"); + return (EXIT_FAILURE); } -void -run_locker(id) +int +locker_start(id) u_long id; { #if defined(MUTEX_THREAD_TEST) - pthread_t *kidsp; - int i; - void *retp; -#endif - int status; + int err, i; - __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ - - srand((u_int)time(NULL) % getpid()); /* Initialize random numbers. */ - -#if defined(MUTEX_THREAD_TEST) /* * Spawn off threads. We have nthreads all locking and going to * sleep, and one other thread cycling through and waking them up. */ if ((kidsp = - (pthread_t *)calloc(sizeof(pthread_t), nthreads)) == NULL) { + (os_thread_t *)calloc(sizeof(os_thread_t), nthreads)) == NULL) { fprintf(stderr, "tm: %s\n", strerror(errno)); - exit(EXIT_FAILURE); + return (1); } for (i = 0; i < nthreads; i++) - if ((errno = pthread_create( + if ((err = os_thread_create( &kidsp[i], NULL, run_lthread, (void *)(id + i))) != 0) { fprintf(stderr, "tm: failed spawning thread: %s\n", - strerror(errno)); - exit(EXIT_FAILURE); + db_strerror(err)); + return (1); } + return (0); +#else + return (run_lthread((void *)id) == NULL ? 0 : 1); +#endif +} + +int +locker_wait() +{ +#if defined(MUTEX_THREAD_TEST) + int i; + void *retp; /* Wait for the threads to exit. */ - status = EXIT_SUCCESS; for (i = 0; i < nthreads; i++) { - pthread_join(kidsp[i], &retp); + os_thread_join(kidsp[i], &retp); if (retp != NULL) { fprintf(stderr, "tm: thread exited with error\n"); - status = EXIT_FAILURE; + return (1); } } free(kidsp); -#else - status = (int)run_lthread((void *)id); #endif - exit(status); + return (0); } void * @@ -297,49 +372,38 @@ run_lthread(arg) { TM *gp, *mp, *tp; u_long id, tid; - int fd, i, lock, nl, remap; - u_int8_t *gm_addr, *lm_addr, *tm_addr; + int err, i, lock, nl; - id = (int)arg; + id = (uintptr_t)arg; #if defined(MUTEX_THREAD_TEST) - tid = (u_long)pthread_self(); + tid = (u_long)os_thread_self(); #else tid = 0; #endif printf("Locker: ID %03lu (PID: %lu; TID: %lx)\n", id, (u_long)getpid(), tid); - nl = nlocks; - for (gm_addr = NULL, gp = tp = NULL, remap = 0;;) { - /* Map in the file as necessary. */ - if (gm_addr == NULL) { - map_file(&gm_addr, &tm_addr, &lm_addr, &fd); - gp = (TM *)gm_addr; - tp = (TM *)(tm_addr + id * align); - if (verbose) - printf( - "%03lu: map threads @ %#lx; locks @ %#lx\n", - id, (u_long)tm_addr, (u_long)lm_addr); - remap = (rand() % 100) + 35; - } + gp = (TM *)gm_addr; + tp = (TM *)(tm_addr + id * sizeof(TM)); + for (nl = nlocks; nl > 0;) { /* Select and acquire a data lock. */ lock = rand() % maxlocks; - mp = (TM *)(lm_addr + lock * align); + mp = (TM *)(lm_addr + lock * sizeof(TM)); if (verbose) - printf("%03lu: lock %d @ %#lx\n", - id, lock, (u_long)&mp->mutex); + printf("%03lu: lock %d (mtx: %lu)\n", + id, lock, (u_long)mp->mutex); - if (__db_mutex_lock(&dbenv, &mp->mutex)) { + if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) { fprintf(stderr, "%03lu: never got lock %d: %s\n", - id, lock, strerror(errno)); - return ((void *)EXIT_FAILURE); + id, lock, db_strerror(err)); + return ((void *)1); } if (mp->id != 0) { fprintf(stderr, "RACE! (%03lu granted lock %d held by %03lu)\n", id, lock, mp->id); - return ((void *)EXIT_FAILURE); + return ((void *)1); } mp->id = id; @@ -348,12 +412,12 @@ run_lthread(arg) * we still hold the mutex. */ for (i = 0; i < 3; ++i) { - __os_sleep(&dbenv, 0, rand() % 3); + __os_sleep(dbenv, 0, rand() % 3); if (mp->id != id) { fprintf(stderr, "RACE! (%03lu stole lock %d from %03lu)\n", mp->id, lock, id); - return ((void *)EXIT_FAILURE); + return ((void *)1); } } @@ -367,112 +431,106 @@ run_lthread(arg) * * The wakeup thread will wake us up. */ - if (__db_mutex_lock(&dbenv, &gp->mutex)) { + if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) { fprintf(stderr, - "%03lu: global lock: %s\n", id, strerror(errno)); - return ((void *)EXIT_FAILURE); + "%03lu: global lock: %s\n", id, db_strerror(err)); + return ((void *)1); } if (tp->id != 0 && tp->id != id) { fprintf(stderr, "%03lu: per-thread mutex isn't mine, owned by %03lu\n", id, tp->id); - return ((void *)EXIT_FAILURE); + return ((void *)1); } tp->id = id; if (verbose) - printf("%03lu: self-blocking\n", id); - if (F_ISSET(tp, MUTEX_WAKEME)) { + printf("%03lu: self-blocking (mtx: %lu)\n", + id, (u_long)tp->mutex); + if (tp->wakeme) { fprintf(stderr, "%03lu: wakeup flag incorrectly set\n", id); - return ((void *)EXIT_FAILURE); + return ((void *)1); } - F_SET(tp, MUTEX_WAKEME); - if (__db_mutex_unlock(&dbenv, &gp->mutex)) { + tp->wakeme = 1; + if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) { fprintf(stderr, - "%03lu: global unlock: %s\n", id, strerror(errno)); - return ((void *)EXIT_FAILURE); + "%03lu: global unlock: %s\n", id, db_strerror(err)); + return ((void *)1); } - if (__db_mutex_lock(&dbenv, &tp->mutex)) { + if ((err = dbenv->mutex_lock(dbenv, tp->mutex)) != 0) { fprintf(stderr, "%03lu: per-thread lock: %s\n", - id, strerror(errno)); - return ((void *)EXIT_FAILURE); + id, db_strerror(err)); + return ((void *)1); } /* Time passes... */ - if (F_ISSET(tp, MUTEX_WAKEME)) { + if (tp->wakeme) { fprintf(stderr, "%03lu: wakeup flag not cleared\n", id); - return ((void *)EXIT_FAILURE); + return ((void *)1); } if (verbose) - printf("%03lu: release %d @ %#lx\n", - id, lock, (u_long)&mp->mutex); + printf("%03lu: release %d (mtx: %lu)\n", + id, lock, (u_long)mp->mutex); /* Release the data lock. */ mp->id = 0; - if (__db_mutex_unlock(&dbenv, &mp->mutex)) { + if ((err = dbenv->mutex_unlock(dbenv, mp->mutex)) != 0) { fprintf(stderr, - "%03lu: lock release: %s\n", id, strerror(errno)); - return ((void *)EXIT_FAILURE); + "%03lu: lock release: %s\n", id, db_strerror(err)); + return ((void *)1); } - if (--nl % 100 == 0) + if (--nl % 100 == 0) { fprintf(stderr, "%03lu: %d\n", id, nl); - - if (nl == 0 || --remap == 0) { - if (verbose) - printf("%03lu: re-mapping\n", id); - unmap_file(gm_addr, fd); - gm_addr = NULL; - - if (nl == 0) - break; - - __os_sleep(&dbenv, 0, rand() % 500); + /* + * Windows buffers stderr and the output looks wrong + * without this. + */ + fflush(stderr); } } return (NULL); } -void -run_wakeup(id) +int +wakeup_start(id) u_long id; { #if defined(MUTEX_THREAD_TEST) - pthread_t wakep; - int status; - void *retp; -#endif - __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */ - - srand((u_int)time(NULL) % getpid()); /* Initialize random numbers. */ + int err; -#if defined(MUTEX_THREAD_TEST) /* * Spawn off wakeup thread. */ - if ((errno = pthread_create( + if ((err = os_thread_create( &wakep, NULL, run_wthread, (void *)id)) != 0) { fprintf(stderr, "tm: failed spawning wakeup thread: %s\n", - strerror(errno)); - exit(EXIT_FAILURE); + db_strerror(err)); + return (1); } + return (0); +#else + return (run_wthread((void *)id) == NULL ? 0 : 1); +#endif +} + +int +wakeup_wait() +{ +#if defined(MUTEX_THREAD_TEST) + void *retp; /* - * run_locker will create a file when the wakeup thread is no - * longer needed. + * A file is created when the wakeup thread is no longer needed. */ - status = 0; - pthread_join(wakep, &retp); + os_thread_join(wakep, &retp); if (retp != NULL) { fprintf(stderr, "tm: wakeup thread exited with error\n"); - status = EXIT_FAILURE; + return (1); } - - exit(status); -#else - exit((int)run_wthread((void *)id)); #endif + return (0); } /* @@ -483,31 +541,25 @@ void * run_wthread(arg) void *arg; { - struct stat sb; TM *gp, *tp; u_long id, tid; - int fd, check_id; - u_int8_t *gm_addr, *tm_addr; + int check_id, err; - id = (int)arg; + id = (uintptr_t)arg; #if defined(MUTEX_THREAD_TEST) - tid = (u_long)pthread_self(); + tid = (u_long)os_thread_self(); #else tid = 0; #endif printf("Wakeup: ID %03lu (PID: %lu; TID: %lx)\n", id, (u_long)getpid(), tid); - arg = NULL; - map_file(&gm_addr, &tm_addr, NULL, &fd); - if (verbose) - printf("%03lu: map threads @ %#lx\n", id, (u_long)tm_addr); gp = (TM *)gm_addr; /* Loop, waking up sleepers and periodically sleeping ourselves. */ for (check_id = 0;; ++check_id) { /* Check to see if the locking threads have finished. */ - if (stat(MT_FILE_QUIT, &sb) == 0) + if (__os_exists(dbenv, MT_FILE_QUIT, NULL) == 0) break; /* Check for ID wraparound. */ @@ -515,69 +567,128 @@ run_wthread(arg) check_id = 0; /* Check for a thread that needs a wakeup. */ - tp = (TM *)(tm_addr + check_id * align); - if (!F_ISSET(tp, MUTEX_WAKEME)) + tp = (TM *)(tm_addr + check_id * sizeof(TM)); + if (!tp->wakeme) continue; - if (verbose) - printf("%03lu: wakeup thread %03lu @ %#lx\n", - id, tp->id, (u_long)&tp->mutex); + if (verbose) { + printf("%03lu: wakeup thread %03lu (mtx: %lu)\n", + id, tp->id, (u_long)tp->mutex); + fflush(stdout); + } /* Acquire the global lock. */ - if (__db_mutex_lock(&dbenv, &gp->mutex)) { + if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) { fprintf(stderr, - "wakeup: global lock: %s\n", strerror(errno)); - return ((void *)EXIT_FAILURE); + "wakeup: global lock: %s\n", db_strerror(err)); + return ((void *)1); } - F_CLR(tp, MUTEX_WAKEME); - if (__db_mutex_unlock(&dbenv, &tp->mutex)) { + tp->wakeme = 0; + if ((err = dbenv->mutex_unlock(dbenv, tp->mutex)) != 0) { fprintf(stderr, - "wakeup: unlock: %s\n", strerror(errno)); - return ((void *)EXIT_FAILURE); + "wakeup: unlock: %s\n", db_strerror(err)); + return ((void *)1); } - if (__db_mutex_unlock(&dbenv, &gp->mutex)) { + if ((err = dbenv->mutex_unlock(dbenv, gp->mutex))) { fprintf(stderr, - "wakeup: global unlock: %s\n", strerror(errno)); - return ((void *)EXIT_FAILURE); + "wakeup: global unlock: %s\n", db_strerror(err)); + return ((void *)1); } - __os_sleep(&dbenv, 0, rand() % 3); + __os_sleep(dbenv, 0, rand() % 3); } return (NULL); } /* + * tm_env_init -- + * Create the backing database environment. + */ +int +tm_env_init() +{ + u_int32_t flags; + int ret; + char *home; + + /* + * Create an environment object and initialize it for error + * reporting. + */ + if ((ret = db_env_create(&dbenv, 0)) != 0) { + fprintf(stderr, "tm: %s\n", db_strerror(ret)); + return (1); + } + dbenv->set_errfile(dbenv, stderr); + dbenv->set_errpfx(dbenv, "tm"); + + /* Allocate enough mutexes. */ + if ((ret = dbenv->mutex_set_increment(dbenv, + 1 + nthreads * nprocs + maxlocks)) != 0) { + dbenv->err(dbenv, ret, "dbenv->mutex_set_increment"); + return (1); + } + + flags = DB_CREATE; + if (nprocs == 1) { + home = NULL; + flags |= DB_PRIVATE; + } else + home = TESTDIR; + if (nthreads != 1) + flags |= DB_THREAD; + if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) { + dbenv->err(dbenv, ret, "environment open: %s", home); + return (1); + } + + return (0); +} + +/* + * tm_env_close -- + * Close the backing database environment. + */ +void +tm_env_close() +{ + (void)dbenv->close(dbenv, 0); +} + +/* * tm_file_init -- * Initialize the backing file. */ void tm_file_init() { - int fd; + DB_FH *fhp; + int err; + size_t nwrite; /* Initialize the backing file. */ if (verbose) printf("Create the backing file.\n"); - (void)shm_unlink(MT_FILE); + (void)unlink(MT_FILE); - if ((fd = shm_open( - MT_FILE, O_CREAT | O_RDWR | O_TRUNC, - S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) == -1) { + if ((err = __os_open(dbenv, MT_FILE, + DB_OSO_CREATE | DB_OSO_TRUNC, 0666, &fhp)) == -1) { (void)fprintf(stderr, - "%s: open: %s\n", MT_FILE, strerror(errno)); + "%s: open: %s\n", MT_FILE, db_strerror(err)); exit(EXIT_FAILURE); } - if (lseek(fd, - (off_t)len, SEEK_SET) != (off_t)len || write(fd, &fd, 1) != 1) { + if ((err = __os_seek(dbenv, fhp, 0, 0, len)) != 0 || + (err = __os_write(dbenv, fhp, &err, 1, &nwrite)) != 0 || + nwrite != 1) { (void)fprintf(stderr, - "%s: seek/write: %s\n", MT_FILE, strerror(errno)); + "%s: seek/write: %s\n", MT_FILE, db_strerror(err)); exit(EXIT_FAILURE); } - (void)close(fd); + (void)__os_closehandle(dbenv, fhp); } /* @@ -588,59 +699,59 @@ void tm_mutex_init() { TM *mp; - int fd, i; - u_int8_t *gm_addr, *lm_addr, *tm_addr; - - map_file(&gm_addr, &tm_addr, &lm_addr, &fd); - if (verbose) - printf("init: map threads @ %#lx; locks @ %#lx\n", - (u_long)tm_addr, (u_long)lm_addr); + int err, i; if (verbose) - printf("Initialize the global mutex:\n"); + printf("Allocate the global mutex: "); mp = (TM *)gm_addr; - if (__db_mutex_init_int(&dbenv, &mp->mutex, 0, 0)) { + if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) { fprintf(stderr, - "__db_mutex_init (global): %s\n", strerror(errno)); + "DB_ENV->mutex_alloc (global): %s\n", db_strerror(err)); exit(EXIT_FAILURE); } if (verbose) - printf("\t@ %#lx\n", (u_long)&mp->mutex); + printf("%lu\n", (u_long)mp->mutex); if (verbose) printf( - "Initialize %d per-thread mutexes:\n", nthreads * nprocs); + "Allocate %d per-thread, self-blocking mutexes: ", + nthreads * nprocs); for (i = 0; i < nthreads * nprocs; ++i) { - mp = (TM *)(tm_addr + i * align); - if (__db_mutex_init_int( - &dbenv, &mp->mutex, 0, MUTEX_SELF_BLOCK)) { - fprintf(stderr, "__db_mutex_init (per-thread %d): %s\n", - i, strerror(errno)); + mp = (TM *)(tm_addr + i * sizeof(TM)); + if ((err = dbenv->mutex_alloc( + dbenv, DB_MUTEX_SELF_BLOCK, &mp->mutex)) != 0) { + fprintf(stderr, + "DB_ENV->mutex_alloc (per-thread %d): %s\n", + i, db_strerror(err)); exit(EXIT_FAILURE); } - if (__db_mutex_lock(&dbenv, &mp->mutex)) { - fprintf(stderr, "__db_mutex_lock (per-thread %d): %s\n", - i, strerror(errno)); + if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) { + fprintf(stderr, + "DB_ENV->mutex_lock (per-thread %d): %s\n", + i, db_strerror(err)); exit(EXIT_FAILURE); } if (verbose) - printf("\t@ %#lx\n", (u_long)&mp->mutex); + printf("%lu ", (u_long)mp->mutex); } + if (verbose) + printf("\n"); if (verbose) - printf("Initialize %d per-lock mutexes:\n", maxlocks); + printf("Allocate %d per-lock mutexes: ", maxlocks); for (i = 0; i < maxlocks; ++i) { - mp = (TM *)(lm_addr + i * align); - if (__db_mutex_init_int(&dbenv, &mp->mutex, 0, 0)) { - fprintf(stderr, "__db_mutex_init (per-lock: %d): %s\n", - i, strerror(errno)); + mp = (TM *)(lm_addr + i * sizeof(TM)); + if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) { + fprintf(stderr, + "DB_ENV->mutex_alloc (per-lock: %d): %s\n", + i, db_strerror(err)); exit(EXIT_FAILURE); } if (verbose) - printf("\t@ %#lx\n", (u_long)&mp->mutex); + printf("%lu ", (u_long)mp->mutex); } - - unmap_file(gm_addr, fd); + if (verbose) + printf("\n"); } /* @@ -651,28 +762,25 @@ void tm_mutex_destroy() { TM *gp, *mp; - int fd, i; - u_int8_t *gm_addr, *lm_addr, *tm_addr; - - map_file(&gm_addr, &tm_addr, &lm_addr, &fd); + int err, i; if (verbose) printf("Destroy the global mutex.\n"); gp = (TM *)gm_addr; - if (__db_mutex_destroy(&gp->mutex)) { + if ((err = dbenv->mutex_free(dbenv, gp->mutex)) != 0) { fprintf(stderr, - "__db_mutex_destroy (global): %s\n", strerror(errno)); + "DB_ENV->mutex_free (global): %s\n", db_strerror(err)); exit(EXIT_FAILURE); } if (verbose) printf("Destroy the per-thread mutexes.\n"); for (i = 0; i < nthreads * nprocs; ++i) { - mp = (TM *)(tm_addr + i * align); - if (__db_mutex_destroy(&mp->mutex)) { + mp = (TM *)(tm_addr + i * sizeof(TM)); + if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) { fprintf(stderr, - "__db_mutex_destroy (per-thread %d): %s\n", - i, strerror(errno)); + "DB_ENV->mutex_free (per-thread %d): %s\n", + i, db_strerror(err)); exit(EXIT_FAILURE); } } @@ -680,18 +788,16 @@ tm_mutex_destroy() if (verbose) printf("Destroy the per-lock mutexes.\n"); for (i = 0; i < maxlocks; ++i) { - mp = (TM *)(tm_addr + i * align); - if (__db_mutex_destroy(&mp->mutex)) { + mp = (TM *)(lm_addr + i * sizeof(TM)); + if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) { fprintf(stderr, - "__db_mutex_destroy (per-lock: %d): %s\n", - i, strerror(errno)); + "DB_ENV->mutex_free (per-lock: %d): %s\n", + i, db_strerror(err)); exit(EXIT_FAILURE); } } - unmap_file(gm_addr, fd); - - (void)shm_unlink(MT_FILE); + (void)unlink(MT_FILE); } /* @@ -701,21 +807,19 @@ tm_mutex_destroy() void tm_mutex_stats() { +#ifdef HAVE_STATISTICS TM *mp; - int fd, i; - u_int8_t *gm_addr, *lm_addr; - - map_file(&gm_addr, NULL, &lm_addr, &fd); + int i; + u_int32_t set_wait, set_nowait; printf("Per-lock mutex statistics.\n"); for (i = 0; i < maxlocks; ++i) { - mp = (TM *)(lm_addr + i * align); + mp = (TM *)(lm_addr + i * sizeof(TM)); + __mutex_set_wait_info(dbenv, mp->mutex, &set_wait, &set_nowait); printf("mutex %2d: wait: %lu; no wait %lu\n", i, - (u_long)mp->mutex.mutex_set_wait, - (u_long)mp->mutex.mutex_set_nowait); + (u_long)set_wait, (u_long)set_nowait); } - - unmap_file(gm_addr, fd); +#endif } /* @@ -723,12 +827,13 @@ tm_mutex_stats() * Map in the backing file. */ void -map_file(gm_addrp, tm_addrp, lm_addrp, fdp) +map_file(gm_addrp, tm_addrp, lm_addrp, fhpp) u_int8_t **gm_addrp, **tm_addrp, **lm_addrp; - int *fdp; + DB_FH **fhpp; { void *addr; - int fd; + DB_FH *fhp; + int err; #ifndef MAP_FAILED #define MAP_FAILED (void *)-1 @@ -736,29 +841,24 @@ map_file(gm_addrp, tm_addrp, lm_addrp, fdp) #ifndef MAP_FILE #define MAP_FILE 0 #endif - if ((fd = shm_open(MT_FILE, O_RDWR, 0)) == -1) { - fprintf(stderr, "%s: open %s\n", MT_FILE, strerror(errno)); + if ((err = __os_open(dbenv, MT_FILE, 0, 0, &fhp)) != 0) { + fprintf(stderr, "%s: open %s\n", MT_FILE, db_strerror(err)); exit(EXIT_FAILURE); } - addr = mmap(NULL, len, - PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, (off_t)0); - if (addr == MAP_FAILED) { - fprintf(stderr, "%s: mmap: %s\n", MT_FILE, strerror(errno)); + if ((err = __os_mapfile(dbenv, MT_FILE, fhp, len, 0, &addr)) != 0) { + fprintf(stderr, "%s: mmap: %s\n", MT_FILE, db_strerror(err)); exit(EXIT_FAILURE); } - if (gm_addrp != NULL) - *gm_addrp = (u_int8_t *)addr; - addr = (u_int8_t *)addr + align; - if (tm_addrp != NULL) - *tm_addrp = (u_int8_t *)addr; - addr = (u_int8_t *)addr + align * (nthreads * nprocs); - if (lm_addrp != NULL) - *lm_addrp = (u_int8_t *)addr; - - if (fdp != NULL) - *fdp = fd; + *gm_addrp = (u_int8_t *)addr; + addr = (u_int8_t *)addr + sizeof(TM); + *tm_addrp = (u_int8_t *)addr; + addr = (u_int8_t *)addr + sizeof(TM) * (nthreads * nprocs); + *lm_addrp = (u_int8_t *)addr; + + if (fhpp != NULL) + *fhpp = fhp; } /* @@ -766,16 +866,18 @@ map_file(gm_addrp, tm_addrp, lm_addrp, fdp) * Discard backing file map. */ void -unmap_file(addr, fd) +unmap_file(addr, fhp) u_int8_t *addr; - int fd; + DB_FH *fhp; { - if (munmap(addr, len) != 0) { - fprintf(stderr, "munmap: %s\n", strerror(errno)); + int err; + + if ((err = __os_unmapfile(dbenv, addr, len)) != 0) { + fprintf(stderr, "munmap: %s\n", db_strerror(err)); exit(EXIT_FAILURE); } - if (close(fd) != 0) { - fprintf(stderr, "close: %s\n", strerror(errno)); + if ((err = __os_closehandle(dbenv, fhp)) != 0) { + fprintf(stderr, "close: %s\n", db_strerror(err)); exit(EXIT_FAILURE); } } @@ -792,3 +894,117 @@ usage() "[-n locks] [-p procs] [-T locker=ID|wakeup=ID] [-t threads]"); return (EXIT_FAILURE); } + +/* + * os_wait -- + * Wait for an array of N procs. + */ +int +os_wait(procs, nprocs) + os_pid_t *procs; + int nprocs; +{ + int i, status; +#if defined(DB_WIN32) + DWORD ret; +#endif + + status = 0; + +#if defined(DB_WIN32) + do { + ret = WaitForMultipleObjects(nprocs, procs, FALSE, INFINITE); + i = ret - WAIT_OBJECT_0; + if (i < 0 || i >= nprocs) + return (__os_posix_err(__os_get_syserr())); + + if ((GetExitCodeProcess(procs[i], &ret) == 0) || (ret != 0)) + return (ret); + + /* remove the process handle from the list */ + while (++i < nprocs) + procs[i - 1] = procs[i]; + } while (--nprocs); +#elif !defined(HAVE_VXWORKS) + do { + if ((i = wait(&status)) == -1) + return (__os_posix_err(__os_get_syserr())); + + if (WIFEXITED(status) == 0 || WEXITSTATUS(status) != 0) { + for (i = 0; i < nprocs; i++) + kill(procs[i], SIGKILL); + return (WEXITSTATUS(status)); + } + } while (--nprocs); +#endif + + return (0); +} + +os_pid_t +spawn_proc(id, tmpath, typearg) + u_long id; + char *tmpath, *typearg; +{ + char lbuf[16], nbuf[16], pbuf[16], tbuf[16], Tbuf[256]; + char *const vbuf = verbose ? "-v" : NULL; + char *args[] = { NULL /* tmpath */, + "-l", NULL /* lbuf */, "-n", NULL /* nbuf */, + "-p", NULL /* pbuf */, "-t", NULL /* tbuf */, + "-T", NULL /* Tbuf */, NULL /* vbuf */, + NULL + }; + + args[0] = tmpath; + snprintf(lbuf, sizeof(lbuf), "%d", maxlocks); + args[2] = lbuf; + snprintf(nbuf, sizeof(nbuf), "%d", nlocks); + args[4] = nbuf; + snprintf(pbuf, sizeof(pbuf), "%d", nprocs); + args[6] = pbuf; + snprintf(tbuf, sizeof(tbuf), "%d", nthreads); + args[8] = tbuf; + snprintf(Tbuf, sizeof(Tbuf), "%s=%lu", typearg, id); + args[10] = Tbuf; + args[11] = vbuf; + + return (os_spawn(tmpath, args)); +} + +os_pid_t +os_spawn(path, argv) + const char *path; + char *const argv[]; +{ + os_pid_t pid; + int status; + + COMPQUIET(pid, 0); + COMPQUIET(status, 0); + +#ifdef HAVE_VXWORKS + fprintf(stderr, "ERROR: os_spawn not supported for VxWorks.\n"); + return (OS_BAD_PID); +#elif defined(HAVE_QNX) + /* + * For QNX, we cannot fork if we've ever used threads. So + * we'll use their spawn function. We use 'spawnl' which + * is NOT a POSIX function. + * + * The return value of spawnl is just what we want depending + * on the value of the 'wait' arg. + */ + return (spawnv(P_NOWAIT, path, argv)); +#elif defined(DB_WIN32) + return (os_pid_t)(_spawnv(P_NOWAIT, path, argv)); +#else + if ((pid = fork()) != 0) { + if (pid == -1) + return (OS_BAD_PID); + return (pid); + } else { + execv(path, argv); + exit(EXIT_FAILURE); + } +#endif +} diff --git a/db/mutex/uts4_cc.s b/db/mutex/uts4_cc.s index 9b314c4af..a8f2498ab 100644 --- a/db/mutex/uts4_cc.s +++ b/db/mutex/uts4_cc.s @@ -1,9 +1,9 @@ / See the file LICENSE for redistribution information. / - / Copyright (c) 1997-2004 - / Sleepycat Software. All rights reserved. + / Copyright (c) 1997-2006 + / Oracle Corporation. All rights reserved. / - / $Id: uts4_cc.s,v 11.4 2004/01/28 03:36:18 bostic Exp $ + / $Id: uts4_cc.s,v 12.3 2006/08/24 14:46:16 bostic Exp $ / / int uts_lock ( int *p, int i ); / Update the lock word pointed to by p with the |