summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjbj <devnull@localhost>2003-12-16 03:41:35 +0000
committerjbj <devnull@localhost>2003-12-16 03:41:35 +0000
commit60207c0346637a32b792e371436eeb731665409e (patch)
tree2025b5b3b0b525c674913ea8a267e4fd8c6e466c
parent8960e3895f7af91126465368dff8fbb36ab4e853 (diff)
downloadrpm-60207c0346637a32b792e371436eeb731665409e.tar.gz
rpm-60207c0346637a32b792e371436eeb731665409e.tar.bz2
rpm-60207c0346637a32b792e371436eeb731665409e.zip
- build with db-4.2.52 internal.
- refresh bsddb. CVS patchset: 6973 CVS date: 2003/12/16 03:41:35
-rw-r--r--CHANGES2
-rwxr-xr-xautogen.sh8
-rwxr-xr-xbeecrypt/autogen.sh8
-rwxr-xr-xexpat/autogen.sh14
-rwxr-xr-xfile/autogen.sh8
-rw-r--r--python/_rpmdb.c356
-rw-r--r--python/rpmdb/__init__.py142
-rw-r--r--python/rpmdb/db.py4
-rw-r--r--python/rpmdb/dbobj.py10
-rw-r--r--python/rpmdb/dbtables.py5
-rw-r--r--python/rpmdb/test/test_associate.py2
-rw-r--r--python/rpmdb/test/test_basics.py135
-rw-r--r--python/rpmdb/test/test_compat.py2
-rw-r--r--python/rpmdb/test/test_dbtables.py2
-rw-r--r--python/rpmdb/test/test_join.py106
-rw-r--r--python/rpmdb/test/test_thread.py4
-rw-r--r--rpmdb/Makefile.am2
17 files changed, 654 insertions, 156 deletions
diff --git a/CHANGES b/CHANGES
index 1b2e06be5..af7db6d3f 100644
--- a/CHANGES
+++ b/CHANGES
@@ -7,6 +7,8 @@
- don't use error string after gzclose (Dmitry V. Levin).
- only internal Berkeley db from now on.
- revive "make dist".
+ - build with db-4.2.52 internal.
+ - refresh bsddb.
4.2 -> 4.2.1:
- fix: nested %if handling, optind initialization posix vs. glibc.
diff --git a/autogen.sh b/autogen.sh
index 50331a7b2..88065ab96 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -4,13 +4,13 @@ export CFLAGS
export LDFLAGS
LTV="libtoolize (GNU libtool) 1.5"
-ACV="autoconf (GNU Autoconf) 2.57"
-AMV="automake (GNU automake) 1.7.9"
+ACV="autoconf (GNU Autoconf) 2.59"
+AMV="automake (GNU automake) 1.8"
USAGE="
This script documents the versions of the tools I'm using to build rpm:
libtool-1.5
- autoconf-2.57
- automake-1.7.9
+ autoconf-2.59
+ automake-1.8
Simply edit this script to change the libtool/autoconf/automake versions
checked if you need to, as rpm should build (and has built) with all
recent versions of libtool/autoconf/automake.
diff --git a/beecrypt/autogen.sh b/beecrypt/autogen.sh
index 02d297ffd..b633dc372 100755
--- a/beecrypt/autogen.sh
+++ b/beecrypt/autogen.sh
@@ -4,13 +4,13 @@ export CFLAGS
export LDFLAGS
LTV="libtoolize (GNU libtool) 1.5"
-ACV="autoconf (GNU Autoconf) 2.57"
-AMV="automake (GNU automake) 1.7.9"
+ACV="autoconf (GNU Autoconf) 2.59"
+AMV="automake (GNU automake) 1.8"
USAGE="
This script documents the versions of the tools I'm using to build rpm:
libtool-1.5
- autoconf-2.57
- automake-1.7.9
+ autoconf-2.59
+ automake-1.8
Simply edit this script to change the libtool/autoconf/automake versions
checked if you need to, as rpm should build (and has built) with all
recent versions of libtool/autoconf/automake.
diff --git a/expat/autogen.sh b/expat/autogen.sh
index f3852397a..e495d6907 100755
--- a/expat/autogen.sh
+++ b/expat/autogen.sh
@@ -3,20 +3,20 @@
export CFLAGS
export LDFLAGS
-LTV="libtoolize (GNU libtool) 1.4.3"
-ACV="autoconf (GNU Autoconf) 2.57"
-AMV="automake (GNU automake) 1.7.9"
+LTV="libtoolize (GNU libtool) 1.5"
+ACV="autoconf (GNU Autoconf) 2.59"
+AMV="automake (GNU automake) 1.8"
USAGE="
This script documents the versions of the tools I'm using to build rpm:
- libtool-1.4.3
- autoconf-2.57
- automake-1.7.9
+ libtool-1.5
+ autoconf-2.59
+ automake-1.8
Simply edit this script to change the libtool/autoconf/automake versions
checked if you need to, as rpm should build (and has built) with all
recent versions of libtool/autoconf/automake.
"
-[ "`libtoolize --version`" != "$LTV" ] && echo "$USAGE" && exit 1
+[ "`libtoolize --version | head -1`" != "$LTV" ] && echo "$USAGE" && exit 1
[ "`autoconf --version | head -1`" != "$ACV" ] && echo "$USAGE" && exit 1
[ "`automake --version | head -1 | sed -e 's/1\.4[a-z]/1.4/'`" != "$AMV" ] && echo "$USAGE" && exit 1
diff --git a/file/autogen.sh b/file/autogen.sh
index cfabd77e4..e495d6907 100755
--- a/file/autogen.sh
+++ b/file/autogen.sh
@@ -4,13 +4,13 @@ export CFLAGS
export LDFLAGS
LTV="libtoolize (GNU libtool) 1.5"
-ACV="autoconf (GNU Autoconf) 2.57"
-AMV="automake (GNU automake) 1.7.9"
+ACV="autoconf (GNU Autoconf) 2.59"
+AMV="automake (GNU automake) 1.8"
USAGE="
This script documents the versions of the tools I'm using to build rpm:
libtool-1.5
- autoconf-2.57
- automake-1.7.9
+ autoconf-2.59
+ automake-1.8
Simply edit this script to change the libtool/autoconf/automake versions
checked if you need to, as rpm should build (and has built) with all
recent versions of libtool/autoconf/automake.
diff --git a/python/_rpmdb.c b/python/_rpmdb.c
index 659485400..d65bb7ac0 100644
--- a/python/_rpmdb.c
+++ b/python/_rpmdb.c
@@ -40,7 +40,7 @@
/*
* Handwritten code to wrap version 3.x of the Berkeley DB library,
* written to replace a SWIG-generated file. It has since been updated
- * to compile with BerkeleyDB versions 3.2 through 4.1.
+ * to compile with BerkeleyDB versions 3.2 through 4.2.
*
* This module was started by Andrew Kuchling to remove the dependency
* on SWIG in a package by Gregory P. Smith <greg@electricrain.com> who
@@ -88,6 +88,7 @@
/* --------------------------------------------------------------------- */
+#include <stddef.h> /* for offsetof() */
#include <Python.h>
#include <db.h>
@@ -96,9 +97,12 @@
/* 40 = 4.0, 33 = 3.3; this will break if the second number is > 9 */
#define DBVER (DB_VERSION_MAJOR * 10 + DB_VERSION_MINOR)
+#if DB_VERSION_MINOR > 9
+#error "eek! DBVER can't handle minor versions > 9"
+#endif
-#define PY_BSDDB_VERSION "4.1.5"
-static char *rcs_id = "Id: _bsddb.c,v 1.13 2003/04/24 14:28:08 bwarsaw Exp ";
+#define PY_BSDDB_VERSION "4.2.4"
+static char *rcs_id = "$Id: _rpmdb.c,v 1.11 2003/12/16 03:41:35 jbj Exp $";
#ifdef WITH_THREAD
@@ -145,12 +149,6 @@ static PyInterpreterState* _db_interpreterState = NULL;
#endif
-
-/* What is the default behaviour when DB->get or DBCursor->get returns a
- DB_NOTFOUND error? Return None or raise an exception? */
-#define GET_RETURNS_NONE_DEFAULT 1
-
-
/* Should DB_INCOMPLETE be turned into a warning or an exception? */
#define INCOMPLETE_IS_WARNING 1
@@ -158,6 +156,7 @@ static PyInterpreterState* _db_interpreterState = NULL;
/* Exceptions */
static PyObject* DBError; /* Base class, all others derive from this */
+static PyObject* DBCursorClosedError; /* raised when trying to use a closed cursor object */
static PyObject* DBKeyEmptyError; /* DB_KEYEMPTY */
static PyObject* DBKeyExistError; /* DB_KEYEXIST */
static PyObject* DBLockDeadlockError; /* DB_LOCK_DEADLOCK */
@@ -193,12 +192,30 @@ static PyObject* DBPermissionsError; /* EPERM */
/* --------------------------------------------------------------------- */
/* Structure definitions */
+#if PYTHON_API_VERSION >= 1010 /* python >= 2.1 support weak references */
+#define HAVE_WEAKREF
+#else
+#undef HAVE_WEAKREF
+#endif
+
+struct behaviourFlags {
+ /* What is the default behaviour when DB->get or DBCursor->get returns a
+ DB_NOTFOUND error? Return None or raise an exception? */
+ unsigned int getReturnsNone : 1;
+ /* What is the default behaviour for DBCursor.set* methods when DBCursor->get
+ * returns a DB_NOTFOUND error? Return None or raise an exception? */
+ unsigned int cursorSetReturnsNone : 1;
+};
+
+#define DEFAULT_GET_RETURNS_NONE 1
+#define DEFAULT_CURSOR_SET_RETURNS_NONE 1 /* 0 in pybsddb < 4.2, python < 2.4 */
+
typedef struct {
PyObject_HEAD
DB_ENV* db_env;
u_int32_t flags; /* saved flags from open() */
int closed;
- int getReturnsNone;
+ struct behaviourFlags moduleFlags;
} DBEnvObject;
@@ -209,7 +226,7 @@ typedef struct {
u_int32_t flags; /* saved flags from open() */
u_int32_t setflags; /* saved flags from set_flags() */
int haveStat;
- int getReturnsNone;
+ struct behaviourFlags moduleFlags;
#if (DBVER >= 33)
PyObject* associateCallback;
int primaryDBType;
@@ -221,6 +238,9 @@ typedef struct {
PyObject_HEAD
DBC* dbc;
DBObject* mydb;
+#ifdef HAVE_WEAKREF
+ PyObject *in_weakreflist; /* List of weak references */
+#endif
} DBCursorObject;
@@ -256,27 +276,23 @@ staticforward PyTypeObject DB_Type, DBCursor_Type, DBEnv_Type, DBTxn_Type, DBLoc
#define RETURN_NONE() Py_INCREF(Py_None); return Py_None;
-#define CHECK_DB_NOT_CLOSED(dbobj) \
- if (dbobj->db == NULL) { \
- PyErr_SetObject(DBError, Py_BuildValue("(is)", 0, \
- "DB object has been closed")); \
- return NULL; \
+#define _CHECK_OBJECT_NOT_CLOSED(nonNull, pyErrObj, name) \
+ if ((nonNull) == NULL) { \
+ PyObject *errTuple = NULL; \
+ errTuple = Py_BuildValue("(is)", 0, #name " object has been closed"); \
+ PyErr_SetObject((pyErrObj), errTuple); \
+ Py_DECREF(errTuple); \
+ return NULL; \
}
-#define CHECK_ENV_NOT_CLOSED(env) \
- if (env->db_env == NULL) { \
- PyErr_SetObject(DBError, Py_BuildValue("(is)", 0, \
- "DBEnv object has been closed"));\
- return NULL; \
- }
+#define CHECK_DB_NOT_CLOSED(dbobj) \
+ _CHECK_OBJECT_NOT_CLOSED(dbobj->db, DBError, DB)
-#define CHECK_CURSOR_NOT_CLOSED(curs) \
- if (curs->dbc == NULL) { \
- PyErr_SetObject(DBError, Py_BuildValue("(is)", 0, \
- "DBCursor object has been closed"));\
- return NULL; \
- }
+#define CHECK_ENV_NOT_CLOSED(env) \
+ _CHECK_OBJECT_NOT_CLOSED(env->db_env, DBError, DBEnv)
+#define CHECK_CURSOR_NOT_CLOSED(curs) \
+ _CHECK_OBJECT_NOT_CLOSED(curs->dbc, DBCursorClosedError, DBCursor)
#define CHECK_DBFLAG(mydb, flag) (((mydb)->flags & (flag)) || \
@@ -579,7 +595,7 @@ static PyObject* _DBCursor_get(DBCursorObject* self, int extra_flags,
char* kwnames[] = { "flags", "dlen", "doff", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, format, kwnames,
- &flags, &dlen, &doff))
+ &flags, &dlen, &doff))
return NULL;
CHECK_CURSOR_NOT_CLOSED(self);
@@ -599,7 +615,7 @@ static PyObject* _DBCursor_get(DBCursorObject* self, int extra_flags,
err = self->dbc->c_get(self->dbc, &key, &data, flags);
MYDB_END_ALLOW_THREADS;
- if ((err == DB_NOTFOUND) && self->mydb->getReturnsNone) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.getReturnsNone) {
Py_INCREF(Py_None);
retval = Py_None;
}
@@ -685,9 +701,10 @@ newDBObject(DBEnvObject* arg, int flags)
}
if (self->myenvobj)
- self->getReturnsNone = self->myenvobj->getReturnsNone;
+ self->moduleFlags = self->myenvobj->moduleFlags;
else
- self->getReturnsNone = GET_RETURNS_NONE_DEFAULT;
+ self->moduleFlags.getReturnsNone = DEFAULT_GET_RETURNS_NONE;
+ self->moduleFlags.cursorSetReturnsNone = DEFAULT_CURSOR_SET_RETURNS_NONE;
MYDB_BEGIN_ALLOW_THREADS;
err = db_create(&self->db, db_env, flags);
@@ -760,6 +777,9 @@ newDBCursorObject(DBC* dbc, DBObject* db)
self->dbc = dbc;
self->mydb = db;
+#ifdef HAVE_WEAKREF
+ self->in_weakreflist = NULL;
+#endif
Py_INCREF(self->mydb);
return self;
}
@@ -769,9 +789,23 @@ static void
DBCursor_dealloc(DBCursorObject* self)
{
int err;
+
+#ifdef HAVE_WEAKREF
+ if (self->in_weakreflist != NULL) {
+ PyObject_ClearWeakRefs((PyObject *) self);
+ }
+#endif
+
if (self->dbc != NULL) {
MYDB_BEGIN_ALLOW_THREADS;
- if (self->mydb->db != NULL)
+ /* If the underlying database has been closed, we don't
+ need to do anything. If the environment has been closed
+ we need to leak, as BerkeleyDB will crash trying to access
+ the environment. There was an exception when the
+ user closed the environment even though there still was
+ a database open. */
+ if (self->mydb->db && self->mydb->myenvobj &&
+ !self->mydb->myenvobj->closed)
err = self->dbc->c_close(self->dbc);
self->dbc = NULL;
MYDB_END_ALLOW_THREADS;
@@ -801,7 +835,8 @@ newDBEnvObject(int flags)
self->closed = 1;
self->flags = flags;
- self->getReturnsNone = GET_RETURNS_NONE_DEFAULT;
+ self->moduleFlags.getReturnsNone = DEFAULT_GET_RETURNS_NONE;
+ self->moduleFlags.cursorSetReturnsNone = DEFAULT_CURSOR_SET_RETURNS_NONE;
MYDB_BEGIN_ALLOW_THREADS;
err = db_env_create(&self->db_env, flags);
@@ -1139,9 +1174,7 @@ DB_close(DBObject* self, PyObject* args)
if (self->db != NULL) {
if (self->myenvobj)
CHECK_ENV_NOT_CLOSED(self->myenvobj);
- MYDB_BEGIN_ALLOW_THREADS;
err = self->db->close(self->db, flags);
- MYDB_END_ALLOW_THREADS;
self->db = NULL;
RETURN_IF_ERR();
}
@@ -1188,7 +1221,7 @@ _DB_consume(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
err = self->db->get(self->db, txn, &key, &data, flags|consume_flag);
MYDB_END_ALLOW_THREADS;
- if ((err == DB_NOTFOUND) && self->getReturnsNone) {
+ if ((err == DB_NOTFOUND) && self->moduleFlags.getReturnsNone) {
err = 0;
Py_INCREF(Py_None);
retval = Py_None;
@@ -1330,7 +1363,7 @@ DB_get(DBObject* self, PyObject* args, PyObject* kwargs)
Py_INCREF(dfltobj);
retval = dfltobj;
}
- else if ((err == DB_NOTFOUND) && self->getReturnsNone) {
+ else if ((err == DB_NOTFOUND) && self->moduleFlags.getReturnsNone) {
err = 0;
Py_INCREF(Py_None);
retval = Py_None;
@@ -1430,7 +1463,7 @@ DB_get_both(DBObject* self, PyObject* args, PyObject* kwargs)
err = self->db->get(self->db, txn, &key, &data, flags);
MYDB_END_ALLOW_THREADS;
- if ((err == DB_NOTFOUND) && self->getReturnsNone) {
+ if ((err == DB_NOTFOUND) && self->moduleFlags.getReturnsNone) {
err = 0;
Py_INCREF(Py_None);
retval = Py_None;
@@ -1531,6 +1564,11 @@ DB_join(DBObject* self, PyObject* args)
free(cursors);
RETURN_IF_ERR();
+ /* FIXME: this is a buggy interface. The returned cursor
+ contains internal references to the passed in cursors
+ but does not hold python references to them or prevent
+ them from being closed prematurely. This can cause
+ python to crash when things are done in the wrong order. */
return (PyObject*) newDBCursorObject(dbc, self);
}
@@ -1634,7 +1672,7 @@ DB_open(DBObject* self, PyObject* args, PyObject* kwargs)
* explicitly passed) but we are in a transaction ready environment:
* add DB_AUTO_COMMIT to allow for older pybsddb apps using transactions
* to work on BerkeleyDB 4.1 without needing to modify their
- * DBEnv or DB open calls.
+ * DBEnv or DB open calls.
* TODO make this behaviour of the library configurable.
*/
flags |= DB_AUTO_COMMIT;
@@ -1711,9 +1749,8 @@ DB_remove(DBObject* self, PyObject* args, PyObject* kwargs)
return NULL;
CHECK_DB_NOT_CLOSED(self);
- MYDB_BEGIN_ALLOW_THREADS;
err = self->db->remove(self->db, filename, database, flags);
- MYDB_END_ALLOW_THREADS;
+ self->db = NULL;
RETURN_IF_ERR();
RETURN_NONE();
}
@@ -2154,6 +2191,17 @@ DB_verify(DBObject* self, PyObject* args, PyObject* kwargs)
MYDB_END_ALLOW_THREADS;
if (outFileName)
fclose(outFile);
+
+ /* DB.verify acts as a DB handle destructor (like close); this was
+ * documented in BerkeleyDB 4.2 but had the undocumented effect
+ * of not being safe in prior versions while still requiring an explicit
+ * DB.close call afterwards. Lets call close for the user to emulate
+ * the safe 4.2 behaviour. */
+#if (DBVER <= 41)
+ self->db->close(self->db, 0);
+#endif
+ self->db = NULL;
+
RETURN_IF_ERR();
RETURN_NONE();
}
@@ -2163,14 +2211,18 @@ static PyObject*
DB_set_get_returns_none(DBObject* self, PyObject* args)
{
int flags=0;
- int oldValue;
+ int oldValue=0;
if (!PyArg_ParseTuple(args,"i:set_get_returns_none", &flags))
return NULL;
CHECK_DB_NOT_CLOSED(self);
- oldValue = self->getReturnsNone;
- self->getReturnsNone = flags;
+ if (self->moduleFlags.getReturnsNone)
+ ++oldValue;
+ if (self->moduleFlags.cursorSetReturnsNone)
+ ++oldValue;
+ self->moduleFlags.getReturnsNone = (flags >= 1);
+ self->moduleFlags.cursorSetReturnsNone = (flags >= 2);
return PyInt_FromLong(oldValue);
}
@@ -2201,7 +2253,7 @@ DB_set_encrypt(DBObject* self, PyObject* args, PyObject* kwargs)
/*-------------------------------------------------------------- */
/* Mapping and Dictionary-like access routines */
-static int DB_length(DBObject* self)
+int DB_length(DBObject* self)
{
int err;
long size = 0;
@@ -2240,7 +2292,7 @@ static int DB_length(DBObject* self)
}
-static PyObject* DB_subscript(DBObject* self, PyObject* keyobj)
+PyObject* DB_subscript(DBObject* self, PyObject* keyobj)
{
int err;
PyObject* retval;
@@ -2618,7 +2670,7 @@ DBC_get(DBCursorObject* self, PyObject* args, PyObject *kwargs)
{
PyErr_Clear();
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi|ii:get",
- &kwnames[1],
+ &kwnames[1],
&keyobj, &flags, &dlen, &doff))
{
PyErr_Clear();
@@ -2650,7 +2702,7 @@ DBC_get(DBCursorObject* self, PyObject* args, PyObject *kwargs)
MYDB_END_ALLOW_THREADS;
- if ((err == DB_NOTFOUND) && self->mydb->getReturnsNone) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.getReturnsNone) {
Py_INCREF(Py_None);
retval = Py_None;
}
@@ -2797,7 +2849,11 @@ DBC_set(DBCursorObject* self, PyObject* args, PyObject *kwargs)
MYDB_BEGIN_ALLOW_THREADS;
err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET);
MYDB_END_ALLOW_THREADS;
- if (makeDBError(err)) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.cursorSetReturnsNone) {
+ Py_INCREF(Py_None);
+ retval = Py_None;
+ }
+ else if (makeDBError(err)) {
retval = NULL;
}
else {
@@ -2855,7 +2911,11 @@ DBC_set_range(DBCursorObject* self, PyObject* args, PyObject* kwargs)
MYDB_BEGIN_ALLOW_THREADS;
err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET_RANGE);
MYDB_END_ALLOW_THREADS;
- if (makeDBError(err)) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.cursorSetReturnsNone) {
+ Py_INCREF(Py_None);
+ retval = Py_None;
+ }
+ else if (makeDBError(err)) {
retval = NULL;
}
else {
@@ -2882,19 +2942,15 @@ DBC_set_range(DBCursorObject* self, PyObject* args, PyObject* kwargs)
return retval;
}
-
static PyObject*
-DBC_get_both(DBCursorObject* self, PyObject* args)
+_DBC_get_set_both(DBCursorObject* self, PyObject* keyobj, PyObject* dataobj,
+ int flags, unsigned int returnsNone)
{
- int err, flags=0;
+ int err;
DBT key, data;
- PyObject* retval, *keyobj, *dataobj;
-
- if (!PyArg_ParseTuple(args, "OO|i:get_both", &keyobj, &dataobj, &flags))
- return NULL;
-
- CHECK_CURSOR_NOT_CLOSED(self);
+ PyObject* retval;
+ /* the caller did this: CHECK_CURSOR_NOT_CLOSED(self); */
if (!make_key_dbt(self->mydb, keyobj, &key, NULL))
return NULL;
if (!make_dbt(dataobj, &data))
@@ -2903,7 +2959,11 @@ DBC_get_both(DBCursorObject* self, PyObject* args)
MYDB_BEGIN_ALLOW_THREADS;
err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_GET_BOTH);
MYDB_END_ALLOW_THREADS;
- if (makeDBError(err)) {
+ if ((err == DB_NOTFOUND) && returnsNone) {
+ Py_INCREF(Py_None);
+ retval = Py_None;
+ }
+ else if (makeDBError(err)) {
retval = NULL;
}
else {
@@ -2929,6 +2989,71 @@ DBC_get_both(DBCursorObject* self, PyObject* args)
return retval;
}
+static PyObject*
+DBC_get_both(DBCursorObject* self, PyObject* args)
+{
+ int flags=0;
+ PyObject *keyobj, *dataobj;
+
+ if (!PyArg_ParseTuple(args, "OO|i:get_both", &keyobj, &dataobj, &flags))
+ return NULL;
+
+ /* if the cursor is closed, self->mydb may be invalid */
+ CHECK_CURSOR_NOT_CLOSED(self);
+
+ return _DBC_get_set_both(self, keyobj, dataobj, flags,
+ self->mydb->moduleFlags.getReturnsNone);
+}
+
+/* Return size of entry */
+static PyObject*
+DBC_get_current_size(DBCursorObject* self, PyObject* args)
+{
+ int err, flags=DB_CURRENT;
+ PyObject* retval = NULL;
+ DBT key, data;
+
+ if (!PyArg_ParseTuple(args, ":get_current_size"))
+ return NULL;
+ CHECK_CURSOR_NOT_CLOSED(self);
+ CLEAR_DBT(key);
+ CLEAR_DBT(data);
+
+ /* We don't allocate any memory, forcing a ENOMEM error and thus
+ getting the record size. */
+ data.flags = DB_DBT_USERMEM;
+ data.ulen = 0;
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->dbc->c_get(self->dbc, &key, &data, flags);
+ MYDB_END_ALLOW_THREADS;
+ if (err == ENOMEM || !err) {
+ /* ENOMEM means positive size, !err means zero length value */
+ retval = PyInt_FromLong((long)data.size);
+ err = 0;
+ }
+
+ FREE_DBT(key);
+ FREE_DBT(data);
+ RETURN_IF_ERR();
+ return retval;
+}
+
+static PyObject*
+DBC_set_both(DBCursorObject* self, PyObject* args)
+{
+ int flags=0;
+ PyObject *keyobj, *dataobj;
+
+ if (!PyArg_ParseTuple(args, "OO|i:set_both", &keyobj, &dataobj, &flags))
+ return NULL;
+
+ /* if the cursor is closed, self->mydb may be invalid */
+ CHECK_CURSOR_NOT_CLOSED(self);
+
+ return _DBC_get_set_both(self, keyobj, dataobj, flags,
+ self->mydb->moduleFlags.cursorSetReturnsNone);
+}
+
static PyObject*
DBC_set_recno(DBCursorObject* self, PyObject* args, PyObject *kwargs)
@@ -2972,7 +3097,11 @@ DBC_set_recno(DBCursorObject* self, PyObject* args, PyObject *kwargs)
MYDB_BEGIN_ALLOW_THREADS;
err = self->dbc->c_get(self->dbc, &key, &data, flags|DB_SET_RECNO);
MYDB_END_ALLOW_THREADS;
- if (makeDBError(err)) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.cursorSetReturnsNone) {
+ Py_INCREF(Py_None);
+ retval = Py_None;
+ }
+ else if (makeDBError(err)) {
retval = NULL;
}
else { /* Can only be used for BTrees, so no need to return int key */
@@ -3017,11 +3146,11 @@ DBC_prev_nodup(DBCursorObject* self, PyObject* args, PyObject *kwargs)
static PyObject*
DBC_join_item(DBCursorObject* self, PyObject* args)
{
- int err;
+ int err, flags=0;
DBT key, data;
PyObject* retval;
- if (!PyArg_ParseTuple(args, ":join_item"))
+ if (!PyArg_ParseTuple(args, "|i:join_item", &flags))
return NULL;
CHECK_CURSOR_NOT_CLOSED(self);
@@ -3034,13 +3163,17 @@ DBC_join_item(DBCursorObject* self, PyObject* args)
}
MYDB_BEGIN_ALLOW_THREADS;
- err = self->dbc->c_get(self->dbc, &key, &data, DB_JOIN_ITEM);
+ err = self->dbc->c_get(self->dbc, &key, &data, flags | DB_JOIN_ITEM);
MYDB_END_ALLOW_THREADS;
- if (makeDBError(err)) {
+ if ((err == DB_NOTFOUND) && self->mydb->moduleFlags.getReturnsNone) {
+ Py_INCREF(Py_None);
+ retval = Py_None;
+ }
+ else if (makeDBError(err)) {
retval = NULL;
}
else {
- retval = Py_BuildValue("s#s#", key.data, key.size);
+ retval = Py_BuildValue("s#", key.data, key.size);
FREE_DBT(key);
}
@@ -3212,6 +3345,21 @@ DBEnv_set_timeout(DBEnvObject* self, PyObject* args, PyObject* kwargs)
#endif /* DBVER >= 40 */
static PyObject*
+DBEnv_set_shm_key(DBEnvObject* self, PyObject* args)
+{
+ int err;
+ long shm_key = 0;
+
+ if (!PyArg_ParseTuple(args, "l:set_shm_key", &shm_key))
+ return NULL;
+ CHECK_ENV_NOT_CLOSED(self);
+
+ err = self->db_env->set_shm_key(self->db_env, shm_key);
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+
+static PyObject*
DBEnv_set_cachesize(DBEnvObject* self, PyObject* args)
{
int err, gbytes=0, bytes=0, ncache=0;
@@ -3755,14 +3903,18 @@ static PyObject*
DBEnv_set_get_returns_none(DBEnvObject* self, PyObject* args)
{
int flags=0;
- int oldValue;
+ int oldValue=0;
if (!PyArg_ParseTuple(args,"i:set_get_returns_none", &flags))
return NULL;
CHECK_ENV_NOT_CLOSED(self);
- oldValue = self->getReturnsNone;
- self->getReturnsNone = flags;
+ if (self->moduleFlags.getReturnsNone)
+ ++oldValue;
+ if (self->moduleFlags.cursorSetReturnsNone)
+ ++oldValue;
+ self->moduleFlags.getReturnsNone = (flags >= 1);
+ self->moduleFlags.cursorSetReturnsNone = (flags >= 2);
return PyInt_FromLong(oldValue);
}
@@ -3984,7 +4136,8 @@ static PyMethodDef DBCursor_methods[] = {
{"set", (PyCFunction)DBC_set, METH_VARARGS|METH_KEYWORDS},
{"set_range", (PyCFunction)DBC_set_range, METH_VARARGS|METH_KEYWORDS},
{"get_both", (PyCFunction)DBC_get_both, METH_VARARGS},
- {"set_both", (PyCFunction)DBC_get_both, METH_VARARGS},
+ {"get_current_size",(PyCFunction)DBC_get_current_size, METH_VARARGS},
+ {"set_both", (PyCFunction)DBC_set_both, METH_VARARGS},
{"set_recno", (PyCFunction)DBC_set_recno, METH_VARARGS|METH_KEYWORDS},
{"consume", (PyCFunction)DBC_consume, METH_VARARGS|METH_KEYWORDS},
{"next_dup", (PyCFunction)DBC_next_dup, METH_VARARGS|METH_KEYWORDS},
@@ -4007,6 +4160,7 @@ static PyMethodDef DBEnv_methods[] = {
#if (DBVER >= 40)
{"set_timeout", (PyCFunction)DBEnv_set_timeout, METH_VARARGS|METH_KEYWORDS},
#endif
+ {"set_shm_key", (PyCFunction)DBEnv_set_shm_key, METH_VARARGS},
{"set_cachesize", (PyCFunction)DBEnv_set_cachesize, METH_VARARGS},
{"set_data_dir", (PyCFunction)DBEnv_set_data_dir, METH_VARARGS},
#if (DBVER >= 32)
@@ -4125,6 +4279,19 @@ statichere PyTypeObject DBCursor_Type = {
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
+#ifdef HAVE_WEAKREF
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ offsetof(DBCursorObject, in_weakreflist), /* tp_weaklistoffset */
+#endif
};
@@ -4255,10 +4422,10 @@ static PyMethodDef bsddb_methods[] = {
*/
#define ADD_INT(dict, NAME) _addIntToDict(dict, #NAME, NAME)
+#define MODULE_NAME_MAX_LEN 11
+static char _bsddbModuleName[MODULE_NAME_MAX_LEN+1] = "_bsddb";
-
-void init_rpmdb(void); /* XXX eliminate gcc warning */
-DL_EXPORT(void) init_rpmdb(void)
+DL_EXPORT(void) init_bsddb(void)
{
PyObject* m;
PyObject* d;
@@ -4281,7 +4448,7 @@ DL_EXPORT(void) init_rpmdb(void)
#endif
/* Create the module and add the functions */
- m = Py_InitModule("_rpmdb", bsddb_methods);
+ m = Py_InitModule(_bsddbModuleName, bsddb_methods);
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
@@ -4302,7 +4469,13 @@ DL_EXPORT(void) init_rpmdb(void)
ADD_INT(d, DB_MAX_PAGES);
ADD_INT(d, DB_MAX_RECORDS);
+#if (DBVER >= 42)
+ ADD_INT(d, DB_RPCCLIENT);
+#else
ADD_INT(d, DB_CLIENT);
+ /* allow apps to be written using DB_RPCCLIENT on older BerkeleyDB */
+ _addIntToDict(d, "DB_RPCCLIENT", DB_CLIENT);
+#endif
ADD_INT(d, DB_XA_CREATE);
ADD_INT(d, DB_CREATE);
@@ -4451,7 +4624,7 @@ DL_EXPORT(void) init_rpmdb(void)
ADD_INT(d, DB_CHECKPOINT);
ADD_INT(d, DB_CURLSN);
#endif
-#if (DBVER >= 33)
+#if ((DBVER >= 33) && (DBVER <= 41))
ADD_INT(d, DB_COMMIT);
#endif
ADD_INT(d, DB_CONSUME);
@@ -4526,6 +4699,18 @@ DL_EXPORT(void) init_rpmdb(void)
ADD_INT(d, DB_NOPANIC);
#endif
+#if (DBVER >= 42)
+ ADD_INT(d, DB_TIME_NOTGRANTED);
+ ADD_INT(d, DB_TXN_NOT_DURABLE);
+ ADD_INT(d, DB_TXN_WRITE_NOSYNC);
+ ADD_INT(d, DB_LOG_AUTOREMOVE);
+ ADD_INT(d, DB_DIRECT_LOG);
+ ADD_INT(d, DB_DIRECT_DB);
+ ADD_INT(d, DB_INIT_REP);
+ ADD_INT(d, DB_ENCRYPT);
+ ADD_INT(d, DB_CHKSUM);
+#endif
+
#if (DBVER >= 41)
ADD_INT(d, DB_ENCRYPT_AES);
ADD_INT(d, DB_AUTO_COMMIT);
@@ -4569,6 +4754,7 @@ DL_EXPORT(void) init_rpmdb(void)
#if !INCOMPLETE_IS_WARNING
MAKE_EX(DBIncompleteError);
#endif
+ MAKE_EX(DBCursorClosedError);
MAKE_EX(DBKeyEmptyError);
MAKE_EX(DBKeyExistError);
MAKE_EX(DBLockDeadlockError);
@@ -4599,6 +4785,22 @@ DL_EXPORT(void) init_rpmdb(void)
/* Check for errors */
if (PyErr_Occurred()) {
PyErr_Print();
- Py_FatalError("can't initialize module _rpmdb");
+ Py_FatalError("can't initialize module _bsddb");
}
}
+
+/* allow this module to be named _pybsddb so that it can be installed
+ * and imported on top of python >= 2.3 that includes its own older
+ * copy of the library named _bsddb without importing the old version. */
+DL_EXPORT(void) init_pybsddb(void)
+{
+ strncpy(_bsddbModuleName, "_pybsddb", MODULE_NAME_MAX_LEN);
+ init_bsddb();
+}
+
+/* allow this module to be named _rpmdb too. */
+DL_EXPORT(void) init_rpmdb(void)
+{
+ strncpy(_bsddbModuleName, "_rpmdb", MODULE_NAME_MAX_LEN);
+ init_bsddb();
+}
diff --git a/python/rpmdb/__init__.py b/python/rpmdb/__init__.py
index 64eb5a97c..860d0153b 100644
--- a/python/rpmdb/__init__.py
+++ b/python/rpmdb/__init__.py
@@ -33,7 +33,7 @@
#----------------------------------------------------------------------
-"""Support for BerkeleyDB 3.1 through 4.1.
+"""Support for BerkeleyDB 3.2 through 4.2.
"""
try:
@@ -52,23 +52,143 @@ error = db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
+import sys
+
+# for backwards compatibility with python versions older than 2.3, the
+# iterator interface is dynamically defined and added using a mixin
+# class. old python can't tokenize it due to the yield keyword.
+if sys.version >= '2.3':
+ exec """
+import UserDict
+from weakref import ref
+class _iter_mixin(UserDict.DictMixin):
+ def _make_iter_cursor(self):
+ cur = self.db.cursor()
+ key = id(cur)
+ self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
+ return cur
+
+ def _gen_cref_cleaner(self, key):
+ # use generate the function for the weakref callback here
+ # to ensure that we do not hold a strict reference to cur
+ # in the callback.
+ return lambda ref: self._cursor_refs.pop(key, None)
+
+ def __iter__(self):
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ # since we're only returning keys, we call the cursor
+ # methods with flags=0, dlen=0, dofs=0
+ key = cur.first(0,0,0)[0]
+ yield key
+
+ next = cur.next
+ while 1:
+ try:
+ key = next(0,0,0)[0]
+ yield key
+ except _bsddb.DBCursorClosedError:
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ cur.set(key,0,0,0)
+ next = cur.next
+ except _bsddb.DBNotFoundError:
+ return
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ return
+
+ def iteritems(self):
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ kv = cur.first()
+ key = kv[0]
+ yield kv
+
+ next = cur.next
+ while 1:
+ try:
+ kv = next()
+ key = kv[0]
+ yield kv
+ except _bsddb.DBCursorClosedError:
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ cur.set(key,0,0,0)
+ next = cur.next
+ except _bsddb.DBNotFoundError:
+ return
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ return
+"""
+else:
+ class _iter_mixin: pass
+
-class _DBWithCursor:
+class _DBWithCursor(_iter_mixin):
"""
A simple wrapper around DB that makes it look like the bsddbobject in
the old module. It uses a cursor as needed to provide DB traversal.
"""
def __init__(self, db):
self.db = db
- self.dbc = None
self.db.set_get_returns_none(0)
+ # FIXME-20031101-greg: I believe there is still the potential
+ # for deadlocks in a multithreaded environment if someone
+ # attempts to use the any of the cursor interfaces in one
+ # thread while doing a put or delete in another thread. The
+ # reason is that _checkCursor and _closeCursors are not atomic
+ # operations. Doing our own locking around self.dbc,
+ # self.saved_dbc_key and self._cursor_refs could prevent this.
+ # TODO: A test case demonstrating the problem needs to be written.
+
+ # self.dbc is a DBCursor object used to implement the
+ # first/next/previous/last/set_location methods.
+ self.dbc = None
+ self.saved_dbc_key = None
+
+ # a collection of all DBCursor objects currently allocated
+ # by the _iter_mixin interface.
+ self._cursor_refs = {}
+
def __del__(self):
self.close()
def _checkCursor(self):
if self.dbc is None:
self.dbc = self.db.cursor()
+ if self.saved_dbc_key is not None:
+ self.dbc.set(self.saved_dbc_key)
+ self.saved_dbc_key = None
+
+ # This method is needed for all non-cursor DB calls to avoid
+ # BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
+ # and DB_THREAD to be thread safe) when intermixing database
+ # operations that use the cursor internally with those that don't.
+ def _closeCursors(self, save=True):
+ if self.dbc:
+ c = self.dbc
+ self.dbc = None
+ if save:
+ self.saved_dbc_key = c.current(0,0,0)[0]
+ c.close()
+ del c
+ for cref in self._cursor_refs.values():
+ c = cref()
+ if c is not None:
+ c.close()
def _checkOpen(self):
if self.db is None:
@@ -87,13 +207,16 @@ class _DBWithCursor:
def __setitem__(self, key, value):
self._checkOpen()
+ self._closeCursors()
self.db[key] = value
def __delitem__(self, key):
self._checkOpen()
+ self._closeCursors()
del self.db[key]
def close(self):
+ self._closeCursors(save=False)
if self.dbc is not None:
self.dbc.close()
v = 0
@@ -152,7 +275,8 @@ def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
cachesize=None, lorder=None, hflags=0):
flags = _checkflag(flag)
- d = db.DB()
+ e = _openDBEnv()
+ d = db.DB(e)
d.set_flags(hflags)
if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
@@ -169,7 +293,8 @@ def btopen(file, flag='c', mode=0666,
pgsize=None, lorder=None):
flags = _checkflag(flag)
- d = db.DB()
+ e = _openDBEnv()
+ d = db.DB(e)
if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
@@ -187,7 +312,8 @@ def rnopen(file, flag='c', mode=0666,
rlen=None, delim=None, source=None, pad=None):
flags = _checkflag(flag)
- d = db.DB()
+ e = _openDBEnv()
+ d = db.DB(e)
if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
@@ -201,6 +327,10 @@ def rnopen(file, flag='c', mode=0666,
#----------------------------------------------------------------------
+def _openDBEnv():
+ e = db.DBEnv()
+ e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
+ return e
def _checkflag(flag):
if flag == 'r':
diff --git a/python/rpmdb/db.py b/python/rpmdb/db.py
index 3bf5f8e35..5ac0fa05f 100644
--- a/python/rpmdb/db.py
+++ b/python/rpmdb/db.py
@@ -40,5 +40,5 @@
from _rpmdb import *
from _rpmdb import __version__
-if version() < (3, 1, 0):
- raise ImportError, "BerkeleyDB 3.x symbols not found. Perhaps python was statically linked with an older version?"
+if version() < (3, 2, 0):
+ raise ImportError, "correct BerkeleyDB symbols not found. Perhaps python was statically linked with an older version?"
diff --git a/python/rpmdb/dbobj.py b/python/rpmdb/dbobj.py
index b2632a1a0..abda657ba 100644
--- a/python/rpmdb/dbobj.py
+++ b/python/rpmdb/dbobj.py
@@ -15,6 +15,12 @@
# implied.
#
+#
+# TODO it would be *really nice* to have an automatic shadow class populator
+# so that new methods don't need to be added here manually after being
+# added to _bsddb.c.
+#
+
import db
try:
@@ -33,6 +39,8 @@ class DBEnv:
return apply(self._cobj.open, args, kwargs)
def remove(self, *args, **kwargs):
return apply(self._cobj.remove, args, kwargs)
+ def set_shm_key(self, *args, **kwargs):
+ return apply(self._cobj.set_shm_key, args, kwargs)
def set_cachesize(self, *args, **kwargs):
return apply(self._cobj.set_cachesize, args, kwargs)
def set_data_dir(self, *args, **kwargs):
@@ -57,6 +65,8 @@ class DBEnv:
return apply(self._cobj.set_lk_max_objects, args, kwargs)
def set_mp_mmapsize(self, *args, **kwargs):
return apply(self._cobj.set_mp_mmapsize, args, kwargs)
+ def set_timeout(self, *args, **kwargs):
+ return apply(self._cobj.set_timeout, args, kwargs)
def set_tmp_dir(self, *args, **kwargs):
return apply(self._cobj.set_tmp_dir, args, kwargs)
def txn_begin(self, *args, **kwargs):
diff --git a/python/rpmdb/dbtables.py b/python/rpmdb/dbtables.py
index 8947c5434..e5be5f115 100644
--- a/python/rpmdb/dbtables.py
+++ b/python/rpmdb/dbtables.py
@@ -15,7 +15,7 @@
# This provides a simple database table interface built on top of
# the Python BerkeleyDB 3 interface.
#
-_cvsid = 'Id: dbtables.py,v 1.7 2003/01/28 17:20:42 bwarsaw Exp '
+_cvsid = 'Id: dbtables.py,v 1.9 2003/09/21 00:08:14 greg Exp '
import re
import sys
@@ -150,6 +150,9 @@ class bsdTableDB :
if truncate:
myflags |= DB_TRUNCATE
self.db = DB(self.env)
+ # this code relies on DBCursor.set* methods to raise exceptions
+ # rather than returning None
+ self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(DB_DUP)
self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
diff --git a/python/rpmdb/test/test_associate.py b/python/rpmdb/test/test_associate.py
index be6ef610d..1dbae37da 100644
--- a/python/rpmdb/test/test_associate.py
+++ b/python/rpmdb/test/test_associate.py
@@ -1,5 +1,5 @@
"""
-TestCases for multi-threaded access to a DB.
+TestCases for DB.associate.
"""
import sys, os, string
diff --git a/python/rpmdb/test/test_basics.py b/python/rpmdb/test/test_basics.py
index e35002b3f..ba629e517 100644
--- a/python/rpmdb/test/test_basics.py
+++ b/python/rpmdb/test/test_basics.py
@@ -44,6 +44,8 @@ class BasicTestCase(unittest.TestCase):
envflags = 0
envsetflags = 0
+ _numKeys = 1002 # PRIVATE. NOTE: must be an even value
+
def setUp(self):
if self.useEnv:
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
@@ -101,17 +103,23 @@ class BasicTestCase(unittest.TestCase):
- def populateDB(self):
+ def populateDB(self, _txn=None):
d = self.d
- for x in range(500):
- key = '%04d' % (1000 - x) # insert keys in reverse order
+
+ for x in range(self._numKeys/2):
+ key = '%04d' % (self._numKeys - x) # insert keys in reverse order
data = self.makeData(key)
- d.put(key, data)
+ d.put(key, data, _txn)
- for x in range(500):
+ d.put('empty value', '', _txn)
+
+ for x in range(self._numKeys/2-1):
key = '%04d' % x # and now some in forward order
data = self.makeData(key)
- d.put(key, data)
+ d.put(key, data, _txn)
+
+ if _txn:
+ _txn.commit()
num = len(d)
if verbose:
@@ -231,20 +239,20 @@ class BasicTestCase(unittest.TestCase):
if verbose:
print data
- assert len(d) == 1000
+ assert len(d) == self._numKeys
keys = d.keys()
- assert len(keys) == 1000
+ assert len(keys) == self._numKeys
assert type(keys) == type([])
d['new record'] = 'a new record'
- assert len(d) == 1001
+ assert len(d) == self._numKeys+1
keys = d.keys()
- assert len(keys) == 1001
+ assert len(keys) == self._numKeys+1
d['new record'] = 'a replacement record'
- assert len(d) == 1001
+ assert len(d) == self._numKeys+1
keys = d.keys()
- assert len(keys) == 1001
+ assert len(keys) == self._numKeys+1
if verbose:
print "the first 10 keys are:"
@@ -256,7 +264,7 @@ class BasicTestCase(unittest.TestCase):
assert d.has_key('spam') == 0
items = d.items()
- assert len(items) == 1001
+ assert len(items) == self._numKeys+1
assert type(items) == type([])
assert type(items[0]) == type(())
assert len(items[0]) == 2
@@ -266,7 +274,7 @@ class BasicTestCase(unittest.TestCase):
pprint(items[:10])
values = d.values()
- assert len(values) == 1001
+ assert len(values) == self._numKeys+1
assert type(values) == type([])
if verbose:
@@ -277,27 +285,36 @@ class BasicTestCase(unittest.TestCase):
#----------------------------------------
- def test03_SimpleCursorStuff(self):
+ def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=1):
if verbose:
print '\n', '-=' * 30
- print "Running %s.test03_SimpleCursorStuff..." % \
- self.__class__.__name__
+ print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
+ (self.__class__.__name__, get_raises_error, set_raises_error)
if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
txn = self.env.txn_begin()
else:
txn = None
c = self.d.cursor(txn=txn)
-
+
rec = c.first()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
- rec = c.next()
-
- assert count == 1000
+ try:
+ rec = c.next()
+ except db.DBNotFoundError, val:
+ if get_raises_error:
+ assert val[0] == db.DB_NOTFOUND
+ if verbose: print val
+ rec = None
+ else:
+ self.fail("unexpected DBNotFoundError")
+ assert c.get_current_size() == len(c.current()[1]), "%s != len(%r)" % (c.get_current_size(), c.current()[1])
+
+ assert count == self._numKeys
rec = c.last()
@@ -306,34 +323,54 @@ class BasicTestCase(unittest.TestCase):
count = count + 1
if verbose and count % 100 == 0:
print rec
- rec = c.prev()
+ try:
+ rec = c.prev()
+ except db.DBNotFoundError, val:
+ if get_raises_error:
+ assert val[0] == db.DB_NOTFOUND
+ if verbose: print val
+ rec = None
+ else:
+ self.fail("unexpected DBNotFoundError")
- assert count == 1000
+ assert count == self._numKeys
rec = c.set('0505')
rec2 = c.current()
assert rec == rec2
assert rec[0] == '0505'
assert rec[1] == self.makeData('0505')
+ assert c.get_current_size() == len(rec[1])
+ # make sure we get empty values properly
+ rec = c.set('empty value')
+ assert rec[1] == ''
+ assert c.get_current_size() == 0
+
try:
- c.set('bad key')
+ n = c.set('bad key')
except db.DBNotFoundError, val:
assert val[0] == db.DB_NOTFOUND
if verbose: print val
else:
- self.fail("expected exception")
+ if set_raises_error:
+ self.fail("expected exception")
+ if n != None:
+ self.fail("expected None: "+`n`)
rec = c.get_both('0404', self.makeData('0404'))
assert rec == ('0404', self.makeData('0404'))
try:
- c.get_both('0404', 'bad data')
+ n = c.get_both('0404', 'bad data')
except db.DBNotFoundError, val:
assert val[0] == db.DB_NOTFOUND
if verbose: print val
else:
- self.fail("expected exception")
+ if get_raises_error:
+ self.fail("expected exception")
+ if n != None:
+ self.fail("expected None: "+`n`)
if self.d.get_type() == db.DB_BTREE:
rec = c.set_range('011')
@@ -409,6 +446,29 @@ class BasicTestCase(unittest.TestCase):
# SF pybsddb bug id 667343
del oldcursor
+ def test03b_SimpleCursorWithoutGetReturnsNone0(self):
+ # same test but raise exceptions instead of returning None
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
+ self.__class__.__name__
+
+ old = self.d.set_get_returns_none(0)
+ assert old == 1
+ self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
+
+ def test03c_SimpleCursorGetReturnsNone2(self):
+ # same test but raise exceptions instead of returning None
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03c_SimpleCursorStuffWithoutSetReturnsNone..." % \
+ self.__class__.__name__
+
+ old = self.d.set_get_returns_none(2)
+ assert old == 1
+ old = self.d.set_get_returns_none(2)
+ assert old == 2
+ self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
#----------------------------------------
@@ -525,23 +585,8 @@ class BasicTransactionTestCase(BasicTestCase):
def populateDB(self):
- d = self.d
txn = self.env.txn_begin()
- for x in range(500):
- key = '%04d' % (1000 - x) # insert keys in reverse order
- data = self.makeData(key)
- d.put(key, data, txn)
-
- for x in range(500):
- key = '%04d' % x # and now some in forward order
- data = self.makeData(key)
- d.put(key, data, txn)
-
- txn.commit()
-
- num = len(d)
- if verbose:
- print "created %d records" % num
+ BasicTestCase.populateDB(self, _txn=txn)
self.txn = self.env.txn_begin()
@@ -576,7 +621,7 @@ class BasicTransactionTestCase(BasicTestCase):
if verbose and count % 100 == 0:
print rec
rec = c.next()
- assert count == 1001
+ assert count == self._numKeys+1
c.close() # Cursors *MUST* be closed before commit!
self.txn.commit()
@@ -805,7 +850,7 @@ class BasicMultiDBTestCase(BasicTestCase):
if verbose and (count % 50) == 0:
print rec
rec = c1.next()
- assert count == 1000
+ assert count == self._numKeys
count = 0
rec = c2.first()
diff --git a/python/rpmdb/test/test_compat.py b/python/rpmdb/test/test_compat.py
index 041081468..55acc8bcc 100644
--- a/python/rpmdb/test/test_compat.py
+++ b/python/rpmdb/test/test_compat.py
@@ -4,7 +4,6 @@ regression test suite.
"""
import sys, os, string
-import rpmdb
import unittest
import tempfile
@@ -12,6 +11,7 @@ from test_all import verbose
from rpmdb import db, hashopen, btopen, rnopen
+
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
diff --git a/python/rpmdb/test/test_dbtables.py b/python/rpmdb/test/test_dbtables.py
index b90906515..dc4f7f027 100644
--- a/python/rpmdb/test/test_dbtables.py
+++ b/python/rpmdb/test/test_dbtables.py
@@ -18,7 +18,7 @@
#
# -- Gregory P. Smith <greg@electricrain.com>
#
-# $Id: test_dbtables.py,v 1.1 2003/05/05 21:42:55 jbj Exp $
+# Id: test_dbtables.py,v 1.6 2003/09/21 00:08:14 greg Exp
import sys, os, re
try:
diff --git a/python/rpmdb/test/test_join.py b/python/rpmdb/test/test_join.py
index ab75ba196..9a46a891c 100644
--- a/python/rpmdb/test/test_join.py
+++ b/python/rpmdb/test/test_join.py
@@ -1,9 +1,115 @@
"""TestCases for using the DB.join and DBCursor.join_item methods.
"""
+import sys, os, string
+import tempfile
+import time
+from pprint import pprint
+
+try:
+ from threading import Thread, currentThread
+ have_threads = 1
+except ImportError:
+ have_threads = 0
+
import unittest
+from test_all import verbose
+
+from rpmdb import db, dbshelve
+
+
+#----------------------------------------------------------------------
+
+ProductIndex = [
+ ('apple', "Convenience Store"),
+ ('blueberry', "Farmer's Market"),
+ ('shotgun', "S-Mart"), # Aisle 12
+ ('pear', "Farmer's Market"),
+ ('chainsaw', "S-Mart"), # "Shop smart. Shop S-Mart!"
+ ('strawberry', "Farmer's Market"),
+]
+
+ColorIndex = [
+ ('blue', "blueberry"),
+ ('red', "apple"),
+ ('red', "chainsaw"),
+ ('red', "strawberry"),
+ ('yellow', "peach"),
+ ('yellow', "pear"),
+ ('black', "shotgun"),
+]
+
+class JoinTestCase(unittest.TestCase):
+ keytype = ''
+
+ def setUp(self):
+ self.filename = self.__class__.__name__ + '.db'
+ homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
+ self.homeDir = homeDir
+ try: os.mkdir(homeDir)
+ except os.error: pass
+ self.env = db.DBEnv()
+ self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
+
+ def tearDown(self):
+ self.env.close()
+ import glob
+ files = glob.glob(os.path.join(self.homeDir, '*'))
+ for file in files:
+ os.remove(file)
+
+ def test01_join(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_join..." % \
+ self.__class__.__name__
+
+ # create and populate primary index
+ priDB = db.DB(self.env)
+ priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
+ map(lambda t, priDB=priDB: apply(priDB.put, t), ProductIndex)
+
+ # create and populate secondary index
+ secDB = db.DB(self.env)
+ secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
+ secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
+ map(lambda t, secDB=secDB: apply(secDB.put, t), ColorIndex)
+
+ sCursor = None
+ jCursor = None
+ try:
+ # lets look up all of the red Products
+ sCursor = secDB.cursor()
+ # Don't do the .set() in an assert, or you can get a bogus failure
+ # when running python -O
+ tmp = sCursor.set('red')
+ assert tmp
+
+ # FIXME: jCursor doesn't properly hold a reference to its
+ # cursors, if they are closed before jcursor is used it
+ # can cause a crash.
+ jCursor = priDB.join([sCursor])
+
+ if jCursor.get(0) != ('apple', "Convenience Store"):
+ self.fail("join cursor positioned wrong")
+ if jCursor.join_item() != 'chainsaw':
+ self.fail("DBCursor.join_item returned wrong item")
+ if jCursor.get(0)[0] != 'strawberry':
+ self.fail("join cursor returned wrong thing")
+ if jCursor.get(0): # there were only three red items to return
+ self.fail("join cursor returned too many items")
+ finally:
+ if jCursor:
+ jCursor.close()
+ if sCursor:
+ sCursor.close()
+ priDB.close()
+ secDB.close()
def test_suite():
suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(JoinTestCase))
+
return suite
diff --git a/python/rpmdb/test/test_thread.py b/python/rpmdb/test/test_thread.py
index 3041557cc..4e7f9f01d 100644
--- a/python/rpmdb/test/test_thread.py
+++ b/python/rpmdb/test/test_thread.py
@@ -262,12 +262,12 @@ class SimpleThreadedBase(BaseThreadedTestCase):
for loop in range(5):
c = d.cursor()
count = 0
- rec = c.first()
+ rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
- rec = c.next()
+ rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
diff --git a/rpmdb/Makefile.am b/rpmdb/Makefile.am
index 9c4199149..e3f5c808b 100644
--- a/rpmdb/Makefile.am
+++ b/rpmdb/Makefile.am
@@ -82,7 +82,7 @@ db.h:
# XXX grrr, force noinst libdb.la for db3.
$(libdb_la):
- sed -e"/^libdir=/s/^.*$$/libdir=''/" < $(top_builddir)/$(WITH_DB_SUBDIR)/libdb-4.1.la > $(libdb_la)
+ sed -e"/^libdir=/s/^.*$$/libdir=''/" < $(top_builddir)/$(WITH_DB_SUBDIR)/libdb-4.2.la > $(libdb_la)
rpmdb_archive_SOURCES =
rpmdb_archive_LDADD = \