summaryrefslogtreecommitdiff
path: root/db/txn/txn_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'db/txn/txn_util.c')
-rw-r--r--db/txn/txn_util.c234
1 files changed, 234 insertions, 0 deletions
diff --git a/db/txn/txn_util.c b/db/txn/txn_util.c
new file mode 100644
index 000000000..8656d2ece
--- /dev/null
+++ b/db/txn/txn_util.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: txn_util.c,v 11.18 2002/08/06 06:25:12 bostic Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+typedef struct __txn_event TXN_EVENT;
+struct __txn_event {
+ TXN_EVENT_T op;
+ TAILQ_ENTRY(__txn_event) links;
+ union {
+ struct {
+ /* Delayed remove. */
+ char *name;
+ u_int8_t *fileid;
+ } r;
+ struct {
+ /* Lock event. */
+ DB_LOCK lock;
+ u_int32_t locker;
+ DB *dbp;
+ } t;
+ } u;
+};
+
+/*
+ * __txn_remevent --
+ *
+ * Creates a remove event that can be added to the commit list.
+ *
+ * PUBLIC: int __txn_remevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int8_t*));
+ */
+int
+__txn_remevent(dbenv, txn, name, fileid)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ u_int8_t *fileid;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ if ((ret = __os_strdup(dbenv, name, &e->u.r.name)) != 0)
+ goto err;
+
+ if (fileid != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, DB_FILE_ID_LEN, &e->u.r.fileid)) != 0)
+ return (ret);
+ memcpy(e->u.r.fileid, fileid, DB_FILE_ID_LEN);
+ }
+
+ e->op = TXN_REMOVE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+
+err: if (e != NULL)
+ __os_free(dbenv, e);
+
+ return (ret);
+}
+
+/*
+ * __txn_lockevent --
+ *
+ * Add a lockevent to the commit-queue. The lock event indicates a locker
+ * trade.
+ *
+ * PUBLIC: int __txn_lockevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+ */
+int
+__txn_lockevent(dbenv, txn, dbp, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB *dbp;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ if (!LOCKING_ON(dbenv))
+ return (0);
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ e->u.t.locker = locker;
+ e->u.t.lock = *lock;
+ e->u.t.dbp = dbp;
+ e->op = TXN_TRADE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+}
+
+/*
+ * __txn_remlock --
+ * Remove a lock event because the locker is going away. We can remove
+ * by lock (using offset) or by locker_id (or by both).
+ *
+ * PUBLIC: void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+ */
+void
+__txn_remlock(dbenv, txn, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ TXN_EVENT *e, *next_e;
+
+ for (e = TAILQ_FIRST(&txn->events); e != NULL; e = next_e) {
+ next_e = TAILQ_NEXT(e, links);
+ if ((e->op != TXN_TRADE && e->op != TXN_TRADED) ||
+ (e->u.t.lock.off != lock->off && e->u.t.locker != locker))
+ continue;
+ TAILQ_REMOVE(&txn->events, e, links);
+ __os_free(dbenv, e);
+ }
+
+ return;
+}
+
+/*
+ * __txn_doevents --
+ * Process the list of events associated with a transaction. On commit,
+ * apply the events; on abort, just toss the entries.
+ *
+ * PUBLIC: int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+ */
+#define DO_TRADE do { \
+ memset(&req, 0, sizeof(req)); \
+ req.lock = e->u.t.lock; \
+ req.op = DB_LOCK_TRADE; \
+ t_ret = __lock_vec(dbenv, e->u.t.locker, 0, &req, 1, NULL); \
+ if (t_ret == 0) \
+ e->u.t.dbp->cur_lid = e->u.t.locker; \
+ else if (t_ret == DB_NOTFOUND) \
+ t_ret = 0; \
+ if (t_ret != 0 && ret == 0) \
+ ret = t_ret; \
+ e->op = TXN_TRADED; \
+} while (0)
+
+int
+__txn_doevents(dbenv, txn, is_commit, preprocess)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int is_commit, preprocess;
+{
+ DB_LOCKREQ req;
+ TXN_EVENT *e;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * This phase only gets called if we have a phase where we
+ * release read locks. Since not all paths will call this
+ * phase, we have to check for it below as well. So, when
+ * we do the trade, we update the opcode of the entry so that
+ * we don't try the trade again.
+ */
+ if (preprocess) {
+ for (e = TAILQ_FIRST(&txn->events);
+ e != NULL; e = TAILQ_NEXT(e, links)) {
+ if (e->op != TXN_TRADE)
+ continue;
+ DO_TRADE;
+ }
+ return (ret);
+ }
+
+ while ((e = TAILQ_FIRST(&txn->events)) != NULL) {
+ TAILQ_REMOVE(&txn->events, e, links);
+ if (!is_commit)
+ goto dofree;
+ switch (e->op) {
+ case TXN_REMOVE:
+ if (e->u.r.fileid != NULL) {
+ if ((t_ret = dbenv->memp_nameop(dbenv,
+ e->u.r.fileid,
+ NULL, e->u.r.name, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.fileid);
+ } else if ((t_ret =
+ __os_unlink(dbenv, e->u.r.name)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.name);
+ break;
+ case TXN_TRADE:
+ DO_TRADE;
+ /* Fall through */
+ case TXN_TRADED:
+ /* Downgrade the lock. */
+ if ((t_ret = __lock_downgrade(dbenv,
+ &e->u.t.lock, DB_LOCK_READ, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ default:
+ /* This had better never happen. */
+ DB_ASSERT(0);
+ }
+dofree: __os_free(dbenv, e);
+ }
+
+ return (ret);
+}