diff options
author | Ulrich Drepper <drepper@redhat.com> | 1998-06-09 15:16:55 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 1998-06-09 15:16:55 +0000 |
commit | bf7997b65c7887d2acda95f5201d818a19d81711 (patch) | |
tree | da3583de3a0b5892f90a4b1eb773a87b554ae37e /db2/lock | |
parent | 7646e67e6cc4c738a7b402c60fed39d52db0433b (diff) | |
download | glibc-bf7997b65c7887d2acda95f5201d818a19d81711.zip glibc-bf7997b65c7887d2acda95f5201d818a19d81711.tar.gz glibc-bf7997b65c7887d2acda95f5201d818a19d81711.tar.bz2 |
Update.
1998-06-09 Ulrich Drepper <drepper@cygnus.com>
* sysdeps/unix/sysv/linux/netinet/ip.h (struct ip_options): Define
__data member only for gcc. Reported by ak@muc.de.
* misc/mntent.h: Undo last patch.
* sysdeps/unix/sysv/linux/fstatvfs.c (fstatvfs): Undo last patch.
* misc/tst/mntent.c: Adjust code for this change.
* io/fts.c: Updated from a slightly more recent BSD version.
* io/fts.h: Likewise.
* libc.map: Add __libc_stack_end.
* db2/Makefile (routines): Add lock_region.
* db2/config.h: Update from db-2.4.14.
* db2/db.h: Likewise.
* db2/db_185.h: Likewise.
* db2/db_int.h: Likewise.
* db2/bt_close.c: Likewise.
* db2/bt_compare.c: Likewise.
* db2/bt_conv.c: Likewise.
* db2/bt_cursor.c: Likewise.
* db2/bt_delete.c: Likewise.
* db2/bt_open.c: Likewise.
* db2/bt_page.c: Likewise.
* db2/bt_put.c: Likewise.
* db2/bt_rec.c: Likewise.
* db2/bt_recno.c: Likewise.
* db2/bt_rsearch.c: Likewise.
* db2/bt_search.c: Likewise.
* db2/bt_split.c: Likewise.
* db2/bt_stat.c: Likewise.
* db2/btree.src: Likewise.
* db2/btree_auto.c: Likewise.
* db2/getlong.c: Likewise.
* db2/db_appinit.c: Likewise.
* db2/db_apprec.c: Likewise.
* db2/db_byteorder.c: Likewise.
* db2/db_err.c: Likewise.
* db2/db_log2.c: Likewise.
* db2/db_region.c: Likewise.
* db2/db_salloc.c: Likewise.
* db2/db_shash.c: Likewise.
* db2/db.c: Likewise.
* db2/db.src: Likewise.
* db2/db_auto.c: Likewise.
* db2/db_conv.c: Likewise.
* db2/db_dispatch.c: Likewise.
* db2/db_dup.c: Likewise.
* db2/db_overflow.c: Likewise.
* db2/db_pr.c: Likewise.
* db2/db_rec.c: Likewise.
* db2/db_ret.c: Likewise.
* db2/db_thread.c: Likewise.
* db2/db185.c: Likewise.
* db2/db185_int.h: Likewise.
* db2/dbm.c: Likewise.
* db2/hash.c: Likewise.
* db2/hash.src: Likewise.
* db2/hash_auto.c: Likewise.
* db2/hash_conv.c: Likewise.
* db2/hash_debug.c: Likewise.
* db2/hash_dup.c: Likewise.
* db2/hash_func.c: Likewise.
* db2/hash_page.c: Likewise.
* db2/hash_rec.c: Likewise.
* db2/hash_stat.c: Likewise.
* db2/btree.h: Likewise.
* db2/btree_ext.h: Likewise.
* db2/clib_ext.h: Likewise.
* db2/common_ext.h: Likewise.
* db2/cxx_int.h: Likewise.
* db2/db.h.src: Likewise.
* db2/db_185.h.src: Likewise.
* db2/db_am.h: Likewise.
* db2/db_auto.h: Likewise.
* db2/db_cxx.h: Likewise.
* db2/db_dispatch.h: Likewise.
* db2/db_ext.h: Likewise.
* db2/db_int.h.src: Likewise.
* db2/db_page.h: Likewise.
* db2/db_shash.h: Likewise.
* db2/db_swap.h: Likewise.
* db2/hash.h: Likewise.
* db2/hash_ext.h: Likewise.
* db2/lock.h: Likewise.
* db2/lock_ext.h: Likewise.
* db2/log.h: Likewise.
* db2/log_ext.h: Likewise.
* db2/mp.h: Likewise.
* db2/mp_ext.h: Likewise.
* db2/mutex_ext.h: Likewise.
* db2/os_ext.h: Likewise.
* db2/os_func.h: Likewise.
* db2/queue.h: Likewise.
* db2/shqueue.h: Likewise.
* db2/txn.h: Likewise.
* db2/lock.c: Likewise.
* db2/lock_conflict.c: Likewise.
* db2/lock_deadlock.c: Likewise.
* db2/lock_region.c: Likewise.
* db2/lock_util.c: Likewise.
* db2/log.c: Likewise.
* db2/log.src: Likewise.
* db2/log_archive.c: Likewise.
* db2/log_auto.c: Likewise.
* db2/log_compare.c: Likewise.
* db2/log_findckp.c: Likewise.
* db2/log_get.c: Likewise.
* db2/log_put.c: Likewise.
* db2/log_rec.c: Likewise.
* db2/log_register.c: Likewise.
* db2/mp_bh.c: Likewise.
* db2/mp_fget.c: Likewise.
* db2/mp_fopen.c: Likewise.
* db2/mp_fput.c: Likewise.
* db2/mp_fset.c: Likewise.
* db2/mp_open.c: Likewise.
* db2/mp_pr.c: Likewise.
* db2/mp_region.c: Likewise.
* db2/mp_sync.c: Likewise.
* db2/68020.gcc: Likewise.
* db2/mutex.c: Likewise.
* db2/parisc.gcc: Likewise.
* db2/parisc.hp: Likewise.
* db2/sco.cc: Likewise.
* db2/os_abs.c: Likewise.
* db2/os_alloc.c: Likewise.
* db2/os_config.c: Likewise.
* db2/os_dir.c: Likewise.
* db2/os_fid.c: Likewise.
* db2/os_fsync.c: Likewise.
* db2/os_map.c: Likewise.
* db2/os_oflags.c: Likewise.
* db2/os_open.c: Likewise.
* db2/os_rpath.c: Likewise.
* db2/os_rw.c: Likewise.
* db2/os_seek.c: Likewise.
* db2/os_sleep.c: Likewise.
* db2/os_spin.c: Likewise.
* db2/os_stat.c: Likewise.
* db2/os_unlink.c: Likewise.
* db2/db_archive.c: Likewise.
* db2/db_checkpoint.c: Likewise.
* db2/db_deadlock.c: Likewise.
* db2/db_dump.c: Likewise.
* db2/db_dump185.c: Likewise.
* db2/db_load.c: Likewise.
* db2/db_printlog.c: Likewise.
* db2/db_recover.c: Likewise.
* db2/db_stat.c: Likewise.
* db2/txn.c: Likewise.
* db2/txn.src: Likewise.
* db2/txn_auto.c: Likewise.
* db2/txn_rec.c: Likewise.
* elf/rtld.c: Move definition of __libc_stack_end to ...
* sysdeps/generic/dl-sysdep.h: ...here.
* sysdeps/unix/sysv/linux/fstatvfs.c: Handle nodiratime option.
* sysdeps/unix/sysv/linux/bits/statvfs.h: Define ST_NODIRATIME.
* sysdeps/unix/sysv/linux/sys/mount.h: Define MS_NODIRATIME.
1998-06-08 21:44 Ulrich Drepper <drepper@cygnus.com>
* sysdeps/unix/sysv/linux/fstatvfs.c: Handle constant option string
from mntent correctly.
1998-06-06 Andreas Jaeger <aj@arthur.rhein-neckar.de>
* sunrpc/Makefile (generated): Correct typo.
1998-06-04 Philip Blundell <philb@gnu.org>
* elf/elf.h (EM_ARM, et al.): New definitions.
* sysdeps/arm/dl-machine.h: Update for new draft ARM ELF ABI.
Diffstat (limited to 'db2/lock')
-rw-r--r-- | db2/lock/lock.c | 666 | ||||
-rw-r--r-- | db2/lock/lock_conflict.c | 4 | ||||
-rw-r--r-- | db2/lock/lock_deadlock.c | 30 | ||||
-rw-r--r-- | db2/lock/lock_region.c | 726 | ||||
-rw-r--r-- | db2/lock/lock_util.c | 93 |
5 files changed, 840 insertions, 679 deletions
diff --git a/db2/lock/lock.c b/db2/lock/lock.c index 0846d3c..3d20e0d 100644 --- a/db2/lock/lock.c +++ b/db2/lock/lock.c @@ -1,28 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 1997 + * Copyright (c) 1996, 1997, 1998 * Sleepycat Software. All rights reserved. */ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock.c 10.43 (Sleepycat) 1/8/98"; +static const char sccsid[] = "@(#)lock.c 10.52 (Sleepycat) 5/10/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES #include <sys/types.h> -#include <sys/mman.h> -#include <sys/stat.h> #include <errno.h> -#include <fcntl.h> -#include <stddef.h> -#include <stdio.h> -#include <stdlib.h> #include <string.h> -#include <unistd.h> #endif #include "db_int.h" @@ -34,248 +27,15 @@ static const char sccsid[] = "@(#)lock.c 10.43 (Sleepycat) 1/8/98"; #include "db_am.h" static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int)); -static int __lock_count_locks __P((DB_LOCKREGION *)); -static int __lock_count_objs __P((DB_LOCKREGION *)); -static int __lock_create __P((const char *, int, DB_ENV *)); static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, int, const DBT *, - db_lockmode_t, struct __db_lock **)); -static int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); +static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, + const DBT *, db_lockmode_t, struct __db_lock **)); static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int)); static void __lock_remove_waiter __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); -static void __lock_reset_region __P((DB_LOCKTAB *)); -static int __lock_validate_region __P((DB_LOCKTAB *)); -#ifdef DEBUG -static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); -#endif - -/* - * Create and initialize a lock region in shared memory. - */ - -/* - * __lock_create -- - * Create the lock region. Returns an errno. In most cases, - * the errno should be that returned by __db_ropen, in which case - * an EAGAIN means that we should retry, and an EEXIST means that - * the region exists and we didn't need to create it. Any other - * sort of errno should be treated as a system error, leading to a - * failure of the original interface call. - */ -static int -__lock_create(path, mode, dbenv) - const char *path; - int mode; - DB_ENV *dbenv; -{ - struct __db_lock *lp; - struct lock_header *tq_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - u_int maxlocks; - u_int32_t i; - int fd, lock_modes, nelements, ret; - const u_int8_t *conflicts; - u_int8_t *curaddr; - - maxlocks = dbenv == NULL || dbenv->lk_max == 0 ? - DB_LOCK_DEFAULT_N : dbenv->lk_max; - lock_modes = dbenv == NULL || dbenv->lk_modes == 0 ? - DB_LOCK_RW_N : dbenv->lk_modes; - conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? - db_rw_conflicts : dbenv->lk_conflicts; - - if ((ret = - __db_rcreate(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, mode, - LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)), - 0, &fd, &lrp)) != 0) - return (ret); - - /* Region exists; now initialize it. */ - lrp->table_size = __db_tablesize(maxlocks); - lrp->magic = DB_LOCKMAGIC; - lrp->version = DB_LOCKVERSION; - lrp->id = 0; - lrp->maxlocks = maxlocks; - lrp->need_dd = 0; - lrp->detect = DB_LOCK_NORUN; - lrp->numobjs = maxlocks; - lrp->nlockers = 0; - lrp->mem_bytes = ALIGN(STRING_SIZE(maxlocks), sizeof(size_t)); - lrp->increment = lrp->hdr.size / 2; - lrp->nmodes = lock_modes; - lrp->nconflicts = 0; - lrp->nrequests = 0; - lrp->nreleases = 0; - lrp->ndeadlocks = 0; - - /* - * As we write the region, we've got to maintain the alignment - * for the structures that follow each chunk. This information - * ends up being encapsulated both in here as well as in the - * lock.h file for the XXX_SIZE macros. - */ - /* Initialize conflict matrix. */ - curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); - memcpy(curaddr, conflicts, lock_modes * lock_modes); - curaddr += lock_modes * lock_modes; - - /* - * Initialize hash table. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); - lrp->hash_off = curaddr - (u_int8_t *)lrp; - nelements = lrp->table_size; - __db_hashinit(curaddr, nelements); - curaddr += nelements * sizeof(DB_HASHTAB); - - /* - * Initialize locks onto a free list. Since locks contains mutexes, - * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT - * boundary. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - tq_head = &lrp->free_locks; - SH_TAILQ_INIT(tq_head); - - for (i = 0; i++ < maxlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - lp = (struct __db_lock *)curaddr; - lp->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); - } - - /* Initialize objects onto a free list. */ - obj_head = &lrp->free_objs; - SH_TAILQ_INIT(obj_head); - - for (i = 0; i++ < maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - /* - * Initialize the string space; as for all shared memory allocation - * regions, this requires size_t alignment, since we store the - * lengths of malloc'd areas in the area.. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); - lrp->mem_off = curaddr - (u_int8_t *)lrp; - __db_shalloc_init(curaddr, lrp->mem_bytes); - - /* Release the lock. */ - (void)__db_mutex_unlock(&lrp->hdr.lock, fd); - - /* Now unmap the region. */ - if ((ret = __db_rclose(dbenv, fd, lrp)) != 0) { - (void)lock_unlink(path, 1 /* force */, dbenv); - return (ret); - } - - return (0); -} int -lock_open(path, flags, mode, dbenv, ltp) - const char *path; - int flags, mode; - DB_ENV *dbenv; - DB_LOCKTAB **ltp; -{ - DB_LOCKTAB *lt; - int ret, retry_cnt; - - /* Validate arguments. */ -#ifdef HAVE_SPINLOCKS -#define OKFLAGS (DB_CREATE | DB_THREAD) -#else -#define OKFLAGS (DB_CREATE) -#endif - if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) - return (ret); - - /* - * Create the lock table structure. - */ - if ((lt = (DB_LOCKTAB *)__db_calloc(1, sizeof(DB_LOCKTAB))) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - return (ENOMEM); - } - lt->dbenv = dbenv; - - /* - * Now, create the lock region if it doesn't already exist. - */ - retry_cnt = 0; -retry: if (LF_ISSET(DB_CREATE) && - (ret = __lock_create(path, mode, dbenv)) != 0) - if (ret == EAGAIN && ++retry_cnt < 3) { - (void)__db_sleep(1, 0); - goto retry; - } else if (ret == EEXIST) /* We did not create the region */ - LF_CLR(DB_CREATE); - else - goto out; - - /* - * Finally, open the region, map it in, and increment the - * reference count. - */ - retry_cnt = 0; -retry1: if ((ret = __db_ropen(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, - LF_ISSET(~(DB_CREATE | DB_THREAD)), <->fd, <->region)) != 0) { - if (ret == EAGAIN && ++retry_cnt < 3) { - (void)__db_sleep(1, 0); - goto retry1; - } - goto out; - } - - if (lt->region->magic != DB_LOCKMAGIC) { - __db_err(dbenv, "lock_open: Bad magic number"); - ret = EINVAL; - goto out; - } - - /* Check for automatic deadlock detection. */ - if (dbenv->lk_detect != DB_LOCK_NORUN) { - if (lt->region->detect != DB_LOCK_NORUN && - dbenv->lk_detect != DB_LOCK_DEFAULT && - lt->region->detect != dbenv->lk_detect) { - __db_err(dbenv, - "lock_open: incompatible deadlock detector mode"); - ret = EINVAL; - goto out; - } - if (lt->region->detect == DB_LOCK_NORUN) - lt->region->detect = dbenv->lk_detect; - } - - /* Set up remaining pointers into region. */ - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); - lt->reg_size = lt->region->hdr.size; - - *ltp = lt; - return (0); - -/* Error handling. */ -out: if (lt->region != NULL) - (void)__db_rclose(lt->dbenv, lt->fd, lt->region); - if (LF_ISSET(DB_CREATE)) - (void)lock_unlink(path, 1, lt->dbenv); - __db_free(lt); - return (ret); -} - -int -lock_id (lt, idp) +lock_id(lt, idp) DB_LOCKTAB *lt; u_int32_t *idp; { @@ -294,8 +54,8 @@ lock_id (lt, idp) int lock_vec(lt, locker, flags, list, nlist, elistp) DB_LOCKTAB *lt; - u_int32_t locker; - int flags, nlist; + u_int32_t locker, flags; + int nlist; DB_LOCKREQ *list, **elistp; { struct __db_lock *lp; @@ -345,7 +105,7 @@ lock_vec(lt, locker, flags, list, nlist, elistp) for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { - if ((ret = __lock_put_internal(lt, lp, 0)) != 0) + if ((ret = __lock_put_internal(lt, lp, 1)) != 0) break; } __lock_freeobj(lt, sh_locker); @@ -436,8 +196,7 @@ lock_vec(lt, locker, flags, list, nlist, elistp) int lock_get(lt, locker, flags, obj, lock_mode, lock) DB_LOCKTAB *lt; - u_int32_t locker; - int flags; + u_int32_t locker, flags; const DBT *obj; db_lockmode_t lock_mode; DB_LOCK *lock; @@ -496,35 +255,6 @@ lock_put(lt, lock) return (ret); } -int -lock_close(lt) - DB_LOCKTAB *lt; -{ - int ret; - - if ((ret = __db_rclose(lt->dbenv, lt->fd, lt->region)) != 0) - return (ret); - - /* Free lock table. */ - __db_free(lt); - return (0); -} - -int -lock_unlink(path, force, dbenv) - const char *path; - int force; - DB_ENV *dbenv; -{ - return (__db_runlink(dbenv, - DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, force)); -} - -/* - * XXX This looks like it could be void, but I'm leaving it returning - * an int because I think it will have to when we go through and add - * the appropriate error checking for the EINTR on mutexes. - */ static int __lock_put_internal(lt, lockp, do_all) DB_LOCKTAB *lt; @@ -593,7 +323,7 @@ __lock_put_internal(lt, lockp, do_all) SH_TAILQ_INSERT_TAIL(&sh_obj->holders, lp_w, links); /* Wake up waiter. */ - (void)__db_mutex_unlock(&lp_w->mutex, lt->fd); + (void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd); state_changed = 1; } @@ -626,8 +356,7 @@ __lock_put_internal(lt, lockp, do_all) static int __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) DB_LOCKTAB *lt; - u_int32_t locker; - int flags; + u_int32_t locker, flags; const DBT *obj; db_lockmode_t lock_mode; struct __db_lock **lockp; @@ -741,7 +470,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) */ (void)__db_mutex_init(&newl->mutex, MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); - (void)__db_mutex_lock(&newl->mutex, lt->fd); + (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); /* * Now, insert the lock onto its locker's list. @@ -772,7 +501,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) if (lrp->detect != DB_LOCK_NORUN) ret = lock_detect(lt, 0, lrp->detect); - (void)__db_mutex_lock(&newl->mutex, lt->fd); + (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); LOCK_LOCKREGION(lt); if (newl->status != DB_LSTAT_PENDING) { @@ -802,306 +531,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) } /* - * This is called at every interface to verify if the region - * has changed size, and if so, to remap the region in and - * reset the process pointers. - */ -static int -__lock_validate_region(lt) - DB_LOCKTAB *lt; -{ - int ret; - - if (lt->reg_size == lt->region->hdr.size) - return (0); - - /* Grow the region. */ - if ((ret = __db_rremap(lt->dbenv, lt->region, - lt->reg_size, lt->region->hdr.size, lt->fd, <->region)) != 0) - return (ret); - - __lock_reset_region(lt); - - return (0); -} - -/* - * We have run out of space; time to grow the region. - */ -static int -__lock_grow_region(lt, which, howmuch) - DB_LOCKTAB *lt; - int which; - size_t howmuch; -{ - struct __db_lock *newl; - struct lock_header *lock_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - float lock_ratio, obj_ratio; - size_t incr, oldsize, used; - u_int32_t i, newlocks, newmem, newobjs; - int ret, usedlocks, usedmem, usedobjs; - u_int8_t *curaddr; - - lrp = lt->region; - oldsize = lrp->hdr.size; - incr = lrp->increment; - - /* Figure out how much of each sort of space we have. */ - usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem); - usedobjs = lrp->numobjs - __lock_count_objs(lrp); - usedlocks = lrp->maxlocks - __lock_count_locks(lrp); - - /* - * Figure out what fraction of the used space belongs to each - * different type of "thing" in the region. Then partition the - * new space up according to this ratio. - */ - used = usedmem + - usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) + - usedobjs * sizeof(DB_LOCKOBJ); - - lock_ratio = usedlocks * - ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used; - obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used; - - newlocks = (u_int32_t)(lock_ratio * - incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ)); - newmem = incr - - (newobjs * sizeof(DB_LOCKOBJ) + - newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - - /* - * Make sure we allocate enough memory for the object being - * requested. - */ - switch (which) { - case DB_LOCK_LOCK: - if (newlocks == 0) { - newlocks = 10; - incr += newlocks * sizeof(struct __db_lock); - } - break; - case DB_LOCK_OBJ: - if (newobjs == 0) { - newobjs = 10; - incr += newobjs * sizeof(DB_LOCKOBJ); - } - break; - case DB_LOCK_MEM: - if (newmem < howmuch * 2) { - incr += howmuch * 2 - newmem; - newmem = howmuch * 2; - } - break; - } - - newmem += ALIGN(incr, sizeof(size_t)) - incr; - incr = ALIGN(incr, sizeof(size_t)); - - /* - * Since we are going to be allocating locks at the beginning of the - * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT - * aligned. We did not guarantee this when we created the region, so - * we may need to pad the old region by extra bytes to ensure this - * alignment. - */ - incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize; - - __db_err(lt->dbenv, - "Growing lock region: %lu locks %lu objs %lu bytes", - (u_long)newlocks, (u_long)newobjs, (u_long)newmem); - - if ((ret = __db_rgrow(lt->dbenv, lt->fd, incr)) != 0) - return (ret); - if ((ret = __db_rremap(lt->dbenv, - lt->region, oldsize, oldsize + incr, lt->fd, <->region)) != 0) - return (ret); - __lock_reset_region(lt); - - /* Update region parameters. */ - lrp = lt->region; - lrp->increment = incr << 1; - lrp->maxlocks += newlocks; - lrp->numobjs += newobjs; - lrp->mem_bytes += newmem; - - curaddr = (u_int8_t *)lrp + oldsize; - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - - /* Put new locks onto the free list. */ - lock_head = &lrp->free_locks; - for (i = 0; i++ < newlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - newl = (struct __db_lock *)curaddr; - SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock); - } - - /* Put new objects onto the free list. */ - obj_head = &lrp->free_objs; - for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - *((size_t *)curaddr) = newmem - sizeof(size_t); - curaddr += sizeof(size_t); - __db_shalloc_free(lt->mem, curaddr); - - return (0); -} - -#ifdef DEBUG -/* - * __lock_dump_region -- - * - * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, u_int)); - */ -void -__lock_dump_region(lt, flags) - DB_LOCKTAB *lt; - u_int flags; -{ - struct __db_lock *lp; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - u_int32_t i, j; - - lrp = lt->region; - - printf("Lock region parameters\n"); - printf("%s:0x%x\t%s:%lu\t%s:%lu\t%s:%lu\n%s:%lu\t%s:%lu\t%s:%lu\t\n", - "magic ", lrp->magic, - "version ", (u_long)lrp->version, - "processes ", (u_long)lrp->hdr.refcnt, - "maxlocks ", (u_long)lrp->maxlocks, - "table size ", (u_long)lrp->table_size, - "nmodes ", (u_long)lrp->nmodes, - "numobjs ", (u_long)lrp->numobjs); - printf("%s:%lu\t%s:%lu\t%s:%lu\n%s:%lu\t%s:%lu\t%s:%lu\n", - "size ", (u_long)lrp->hdr.size, - "nlockers ", (u_long)lrp->nlockers, - "hash_off ", (u_long)lrp->hash_off, - "increment ", (u_long)lrp->increment, - "mem_off ", (u_long)lrp->mem_off, - "mem_bytes ", (u_long)lrp->mem_bytes); -#ifndef HAVE_SPINLOCKS - printf("Mutex: off %lu", (u_long)lrp->hdr.lock.off); -#endif - printf(" waits %lu nowaits %lu", - (u_long)lrp->hdr.lock.mutex_set_wait, - (u_long)lrp->hdr.lock.mutex_set_nowait); - printf("\n%s:%lu\t%s:%lu\t%s:%lu\t%s:%lu\n", - "nconflicts ", (u_long)lrp->nconflicts, - "nrequests ", (u_long)lrp->nrequests, - "nreleases ", (u_long)lrp->nreleases, - "ndeadlocks ", (u_long)lrp->ndeadlocks); - printf("need_dd %lu\n", (u_long)lrp->need_dd); - if (flags & LOCK_DEBUG_CONF) { - printf("\nConflict matrix\n"); - - for (i = 0; i < lrp->nmodes; i++) { - for (j = 0; j < lrp->nmodes; j++) - printf("%lu\t", - (u_long)lt->conflicts[i * lrp->nmodes + j]); - printf("\n"); - } - } - - for (i = 0; i < lrp->table_size; i++) { - op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); - if (op != NULL && flags & LOCK_DEBUG_BUCKET) - printf("Bucket %lu:\n", (unsigned long)i); - while (op != NULL) { - if (op->type == DB_LOCK_LOCKER && - flags & LOCK_DEBUG_LOCKERS) - __lock_dump_locker(lt, op); - else if (flags & LOCK_DEBUG_OBJECTS && - op->type == DB_LOCK_OBJTYPE) - __lock_dump_object(lt, op); - op = SH_TAILQ_NEXT(op, links, __db_lockobj); - } - } - - if (flags & LOCK_DEBUG_LOCK) { - printf("\nLock Free List\n"); - for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - printf("0x%x: %lu\t%lu\t%lu\t0x%x\n", (u_int)lp, - (u_long)lp->holder, (u_long)lp->mode, - (u_long)lp->status, (u_int)lp->obj); - } - } - - if (flags & LOCK_DEBUG_LOCK) { - printf("\nObject Free List\n"); - for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - op != NULL; - op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - printf("0x%x\n", (u_int)op); - } - - if (flags & LOCK_DEBUG_MEM) { - printf("\nMemory Free List\n"); - __db_shalloc_dump(stdout, lt->mem); - } -} - -static void -__lock_dump_locker(lt, op) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; -{ - struct __db_lock *lp; - u_int32_t locker; - void *ptr; - - ptr = SH_DBT_PTR(&op->lockobj); - memcpy(&locker, ptr, sizeof(u_int32_t)); - printf("L %lx", (u_long)locker); - - lp = SH_LIST_FIRST(&op->heldby, __db_lock); - if (lp == NULL) { - printf("\n"); - return; - } - for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) - __lock_printlock(lt, lp, 0); -} - -static void -__lock_dump_object(lt, op) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; -{ - struct __db_lock *lp; - u_int32_t j; - char *ptr; - - ptr = SH_DBT_PTR(&op->lockobj); - for (j = 0; j < op->lockobj.size; ptr++, j++) - printf("%c", (int)*ptr); - printf("\n"); - - printf("H:"); - for (lp = - SH_TAILQ_FIRST(&op->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); - if (lp != NULL) { - printf("\nW:"); - for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - } -} - -/* * __lock_is_locked -- * * PUBLIC: int __lock_is_locked @@ -1136,7 +565,12 @@ __lock_is_locked(lt, locker, dbt, mode) return (0); } -static void +/* + * __lock_printlock -- + * + * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); + */ +void __lock_printlock(lt, lp, ispgno) DB_LOCKTAB *lt; struct __db_lock *lp; @@ -1213,39 +647,6 @@ __lock_printlock(lt, lp, ispgno) printf("\n"); } } -#endif - -static int -__lock_count_locks(lrp) - DB_LOCKREGION *lrp; -{ - struct __db_lock *newl; - int count; - - count = 0; - for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - newl != NULL; - newl = SH_TAILQ_NEXT(newl, links, __db_lock)) - count++; - - return (count); -} - -static int -__lock_count_objs(lrp) - DB_LOCKREGION *lrp; -{ - DB_LOCKOBJ *obj; - int count; - - count = 0; - for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - obj != NULL; - obj = SH_TAILQ_NEXT(obj, links, __db_lockobj)) - count++; - - return (count); -} /* * PUBLIC: int __lock_getobj __P((DB_LOCKTAB *, @@ -1354,19 +755,7 @@ __lock_remove_waiter(lt, sh_obj, lockp, status) lockp->status = status; /* Wake whoever is waiting on this lock. */ - (void)__db_mutex_unlock(&lockp->mutex, lt->fd); -} - -static void -__lock_freeobj(lt, obj) - DB_LOCKTAB *lt; - DB_LOCKOBJ *obj; -{ - HASHREMOVE_EL(lt->hashtab, - __db_lockobj, links, obj, lt->region->table_size, __lock_lhash); - if (obj->lockobj.size > sizeof(obj->objdata)) - __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); - SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); + (void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd); } static void @@ -1384,17 +773,18 @@ __lock_checklocker(lt, lockp, do_remove) if (__lock_getobj(lt, lockp->holder, NULL, DB_LOCK_LOCKER, &sh_locker) == 0 && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL) { __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; + lt->region->nlockers--; } } static void -__lock_reset_region(lt) +__lock_freeobj(lt, obj) DB_LOCKTAB *lt; + DB_LOCKOBJ *obj; { - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); - lt->reg_size = lt->region->hdr.size; + HASHREMOVE_EL(lt->hashtab, + __db_lockobj, links, obj, lt->region->table_size, __lock_lhash); + if (obj->lockobj.size > sizeof(obj->objdata)) + __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); + SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); } diff --git a/db2/lock/lock_conflict.c b/db2/lock/lock_conflict.c index ff0287f..870aa0d 100644 --- a/db2/lock/lock_conflict.c +++ b/db2/lock/lock_conflict.c @@ -1,14 +1,14 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 1997 + * Copyright (c) 1996, 1997, 1998 * Sleepycat Software. All rights reserved. */ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_conflict.c 10.2 (Sleepycat) 6/21/97"; +static const char sccsid[] = "@(#)lock_conflict.c 10.3 (Sleepycat) 4/10/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES diff --git a/db2/lock/lock_deadlock.c b/db2/lock/lock_deadlock.c index 93c438c..4de4929 100644 --- a/db2/lock/lock_deadlock.c +++ b/db2/lock/lock_deadlock.c @@ -1,25 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 1997 + * Copyright (c) 1996, 1997, 1998 * Sleepycat Software. All rights reserved. */ #include "config.h" #ifndef lint -static const char copyright[] = -"@(#) Copyright (c) 1997\n\ - Sleepycat Software Inc. All rights reserved.\n"; -static const char sccsid[] = "@(#)lock_deadlock.c 10.26 (Sleepycat) 11/25/97"; -#endif +static const char sccsid[] = "@(#)lock_deadlock.c 10.32 (Sleepycat) 4/26/98"; +#endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES #include <sys/types.h> #include <errno.h> #include <string.h> -#include <stdlib.h> #endif #include "db_int.h" @@ -59,14 +55,14 @@ static int __dd_build static u_int32_t *__dd_find __P((u_int32_t *, locker_info *, u_int32_t)); -#ifdef DEBUG +#ifdef DIAGNOSTIC static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t)); #endif int lock_detect(lt, flags, atype) DB_LOCKTAB *lt; - int flags, atype; + u_int32_t flags, atype; { DB_ENV *dbenv; locker_info *idmap; @@ -96,7 +92,7 @@ lock_detect(lt, flags, atype) if (nlockers == 0) return (0); -#ifdef DEBUG +#ifdef DIAGNOSTIC if (dbenv->db_verbose != 0) __dd_debug(dbenv, idmap, bitmap, nlockers); #endif @@ -202,7 +198,7 @@ __dd_build(dbenv, bmp, nlockers, idmap) u_int8_t *pptr; locker_info *id_array; u_int32_t *bitmap, count, *entryp, i, id, nentries, *tmpmap; - int is_first, ret; + int is_first; lt = dbenv->lk_info; @@ -322,8 +318,8 @@ retry: count = lt->region->nlockers; lp != NULL; is_first = 0, lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - if ((ret = __lock_getobj(lt, lp->holder, - NULL, DB_LOCK_LOCKER, &lockerp)) != 0) { + if (__lock_getobj(lt, lp->holder, + NULL, DB_LOCK_LOCKER, &lockerp) != 0) { __db_err(dbenv, "warning unable to find object"); continue; @@ -357,8 +353,8 @@ retry: count = lt->region->nlockers; for (id = 0; id < count; id++) { if (!id_array[id].valid) continue; - if ((ret = __lock_getobj(lt, - id_array[id].id, NULL, DB_LOCK_LOCKER, &lockerp)) != 0) { + if (__lock_getobj(lt, + id_array[id].id, NULL, DB_LOCK_LOCKER, &lockerp) != 0) { __db_err(dbenv, "No locks for locker %lu", (u_long)id_array[id].id); continue; @@ -448,7 +444,7 @@ __dd_abort(dbenv, info) SH_LIST_REMOVE(lockp, locker_links, __db_lock); sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock); - (void)__db_mutex_unlock(&lockp->mutex, lt->fd); + (void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd); ret = 0; @@ -456,7 +452,7 @@ out: UNLOCK_LOCKREGION(lt); return (ret); } -#ifdef DEBUG +#ifdef DIAGNOSTIC static void __dd_debug(dbenv, idmap, bitmap, nlockers) DB_ENV *dbenv; diff --git a/db2/lock/lock_region.c b/db2/lock/lock_region.c new file mode 100644 index 0000000..b597560 --- /dev/null +++ b/db2/lock/lock_region.c @@ -0,0 +1,726 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996, 1997, 1998 + * Sleepycat Software. All rights reserved. + */ + +#include "config.h" + +#ifndef lint +static const char sccsid[] = "@(#)lock_region.c 10.15 (Sleepycat) 6/2/98"; +#endif /* not lint */ + +#ifndef NO_SYSTEM_INCLUDES +#include <sys/types.h> + +#include <ctype.h> +#include <errno.h> +#include <string.h> +#endif + +#include "db_int.h" +#include "shqueue.h" +#include "db_shash.h" +#include "lock.h" +#include "common_ext.h" + +static u_int32_t __lock_count_locks __P((DB_LOCKREGION *)); +static u_int32_t __lock_count_objs __P((DB_LOCKREGION *)); +static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); +static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); +static const char *__lock_dump_status __P((db_status_t)); +static void __lock_reset_region __P((DB_LOCKTAB *)); +static int __lock_tabinit __P((DB_ENV *, DB_LOCKREGION *)); + +int +lock_open(path, flags, mode, dbenv, ltp) + const char *path; + u_int32_t flags; + int mode; + DB_ENV *dbenv; + DB_LOCKTAB **ltp; +{ + DB_LOCKTAB *lt; + u_int32_t lock_modes, maxlocks, regflags; + int ret; + + /* Validate arguments. */ +#ifdef HAVE_SPINLOCKS +#define OKFLAGS (DB_CREATE | DB_THREAD) +#else +#define OKFLAGS (DB_CREATE) +#endif + if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) + return (ret); + + /* Create the lock table structure. */ + if ((lt = (DB_LOCKTAB *)__db_calloc(1, sizeof(DB_LOCKTAB))) == NULL) { + __db_err(dbenv, "%s", strerror(ENOMEM)); + return (ENOMEM); + } + lt->dbenv = dbenv; + + /* Grab the values that we need to compute the region size. */ + lock_modes = DB_LOCK_RW_N; + maxlocks = DB_LOCK_DEFAULT_N; + regflags = REGION_SIZEDEF; + if (dbenv != NULL) { + if (dbenv->lk_modes != 0) { + lock_modes = dbenv->lk_modes; + regflags = 0; + } + if (dbenv->lk_max != 0) { + maxlocks = dbenv->lk_max; + regflags = 0; + } + } + + /* Join/create the lock region. */ + lt->reginfo.dbenv = dbenv; + lt->reginfo.appname = DB_APP_NONE; + if (path == NULL) + lt->reginfo.path = NULL; + else + if ((lt->reginfo.path = (char *)__db_strdup(path)) == NULL) + goto err; + lt->reginfo.file = DB_DEFAULT_LOCK_FILE; + lt->reginfo.mode = mode; + lt->reginfo.size = + LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)); + lt->reginfo.dbflags = flags; + lt->reginfo.addr = NULL; + lt->reginfo.fd = -1; + lt->reginfo.flags = regflags; + + if ((ret = __db_rattach(<->reginfo)) != 0) + goto err; + + /* Now set up the pointer to the region. */ + lt->region = lt->reginfo.addr; + + /* Initialize the region if we created it. */ + if (F_ISSET(<->reginfo, REGION_CREATED)) { + lt->region->maxlocks = maxlocks; + lt->region->nmodes = lock_modes; + if ((ret = __lock_tabinit(dbenv, lt->region)) != 0) + goto err; + } else { + /* Check for an unexpected region. */ + if (lt->region->magic != DB_LOCKMAGIC) { + __db_err(dbenv, + "lock_open: %s: bad magic number", path); + ret = EINVAL; + goto err; + } + } + + /* Check for automatic deadlock detection. */ + if (dbenv != NULL && dbenv->lk_detect != DB_LOCK_NORUN) { + if (lt->region->detect != DB_LOCK_NORUN && + dbenv->lk_detect != DB_LOCK_DEFAULT && + lt->region->detect != dbenv->lk_detect) { + __db_err(dbenv, + "lock_open: incompatible deadlock detector mode"); + ret = EINVAL; + goto err; + } + if (lt->region->detect == DB_LOCK_NORUN) + lt->region->detect = dbenv->lk_detect; + } + + /* Set up remaining pointers into region. */ + lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); + lt->hashtab = + (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); + lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); + + UNLOCK_LOCKREGION(lt); + *ltp = lt; + return (0); + +err: if (lt->reginfo.addr != NULL) { + UNLOCK_LOCKREGION(lt); + (void)__db_rdetach(<->reginfo); + if (F_ISSET(<->reginfo, REGION_CREATED)) + (void)lock_unlink(path, 1, dbenv); + } + + if (lt->reginfo.path != NULL) + FREES(lt->reginfo.path); + FREE(lt, sizeof(*lt)); + return (ret); +} + +/* + * __lock_tabinit -- + * Initialize the lock region. + */ +static int +__lock_tabinit(dbenv, lrp) + DB_ENV *dbenv; + DB_LOCKREGION *lrp; +{ + struct __db_lock *lp; + struct lock_header *tq_head; + struct obj_header *obj_head; + DB_LOCKOBJ *op; + u_int32_t i, nelements; + const u_int8_t *conflicts; + u_int8_t *curaddr; + + conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? + db_rw_conflicts : dbenv->lk_conflicts; + + lrp->table_size = __db_tablesize(lrp->maxlocks); + lrp->magic = DB_LOCKMAGIC; + lrp->version = DB_LOCKVERSION; + lrp->id = 0; + /* + * These fields (lrp->maxlocks, lrp->nmodes) are initialized + * in the caller, since we had to grab those values to size + * the region. + */ + lrp->need_dd = 0; + lrp->detect = DB_LOCK_NORUN; + lrp->numobjs = lrp->maxlocks; + lrp->nlockers = 0; + lrp->mem_bytes = ALIGN(STRING_SIZE(lrp->maxlocks), sizeof(size_t)); + lrp->increment = lrp->hdr.size / 2; + lrp->nconflicts = 0; + lrp->nrequests = 0; + lrp->nreleases = 0; + lrp->ndeadlocks = 0; + + /* + * As we write the region, we've got to maintain the alignment + * for the structures that follow each chunk. This information + * ends up being encapsulated both in here as well as in the + * lock.h file for the XXX_SIZE macros. + */ + /* Initialize conflict matrix. */ + curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); + memcpy(curaddr, conflicts, lrp->nmodes * lrp->nmodes); + curaddr += lrp->nmodes * lrp->nmodes; + + /* + * Initialize hash table. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); + lrp->hash_off = curaddr - (u_int8_t *)lrp; + nelements = lrp->table_size; + __db_hashinit(curaddr, nelements); + curaddr += nelements * sizeof(DB_HASHTAB); + + /* + * Initialize locks onto a free list. Since locks contains mutexes, + * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT + * boundary. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); + tq_head = &lrp->free_locks; + SH_TAILQ_INIT(tq_head); + + for (i = 0; i++ < lrp->maxlocks; + curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { + lp = (struct __db_lock *)curaddr; + lp->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); + } + + /* Initialize objects onto a free list. */ + obj_head = &lrp->free_objs; + SH_TAILQ_INIT(obj_head); + + for (i = 0; i++ < lrp->maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { + op = (DB_LOCKOBJ *)curaddr; + SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); + } + + /* + * Initialize the string space; as for all shared memory allocation + * regions, this requires size_t alignment, since we store the + * lengths of malloc'd areas in the area. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); + lrp->mem_off = curaddr - (u_int8_t *)lrp; + __db_shalloc_init(curaddr, lrp->mem_bytes); + return (0); +} + +int +lock_close(lt) + DB_LOCKTAB *lt; +{ + int ret; + + if ((ret = __db_rdetach(<->reginfo)) != 0) + return (ret); + + if (lt->reginfo.path != NULL) + FREES(lt->reginfo.path); + FREE(lt, sizeof(*lt)); + + return (0); +} + +int +lock_unlink(path, force, dbenv) + const char *path; + int force; + DB_ENV *dbenv; +{ + REGINFO reginfo; + int ret; + + memset(®info, 0, sizeof(reginfo)); + reginfo.dbenv = dbenv; + reginfo.appname = DB_APP_NONE; + if (path != NULL && (reginfo.path = (char *)__db_strdup(path)) == NULL) + return (ENOMEM); + reginfo.file = DB_DEFAULT_LOCK_FILE; + ret = __db_runlink(®info, force); + if (reginfo.path != NULL) + FREES(reginfo.path); + return (ret); +} + +/* + * __lock_validate_region -- + * Called at every interface to verify if the region has changed size, + * and if so, to remap the region in and reset the process' pointers. + * + * PUBLIC: int __lock_validate_region __P((DB_LOCKTAB *)); + */ +int +__lock_validate_region(lt) + DB_LOCKTAB *lt; +{ + int ret; + + if (lt->reginfo.size == lt->region->hdr.size) + return (0); + + /* Detach/reattach the region. */ + if ((ret = __db_rreattach(<->reginfo, lt->region->hdr.size)) != 0) + return (ret); + + /* Reset region information. */ + lt->region = lt->reginfo.addr; + __lock_reset_region(lt); + + return (0); +} + +/* + * __lock_grow_region -- + * We have run out of space; time to grow the region. + * + * PUBLIC: int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); + */ +int +__lock_grow_region(lt, which, howmuch) + DB_LOCKTAB *lt; + int which; + size_t howmuch; +{ + struct __db_lock *newl; + struct lock_header *lock_head; + struct obj_header *obj_head; + DB_LOCKOBJ *op; + DB_LOCKREGION *lrp; + float lock_ratio, obj_ratio; + size_t incr, oldsize, used, usedmem; + u_int32_t i, newlocks, newmem, newobjs, usedlocks, usedobjs; + u_int8_t *curaddr; + int ret; + + lrp = lt->region; + oldsize = lrp->hdr.size; + incr = lrp->increment; + + /* Figure out how much of each sort of space we have. */ + usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem); + usedobjs = lrp->numobjs - __lock_count_objs(lrp); + usedlocks = lrp->maxlocks - __lock_count_locks(lrp); + + /* + * Figure out what fraction of the used space belongs to each + * different type of "thing" in the region. Then partition the + * new space up according to this ratio. + */ + used = usedmem + + usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) + + usedobjs * sizeof(DB_LOCKOBJ); + + lock_ratio = usedlocks * + ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used; + obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used; + + newlocks = (u_int32_t)(lock_ratio * + incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); + newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ)); + newmem = incr - + (newobjs * sizeof(DB_LOCKOBJ) + + newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); + + /* + * Make sure we allocate enough memory for the object being + * requested. + */ + switch (which) { + case DB_LOCK_LOCK: + if (newlocks == 0) { + newlocks = 10; + incr += newlocks * sizeof(struct __db_lock); + } + break; + case DB_LOCK_OBJ: + if (newobjs == 0) { + newobjs = 10; + incr += newobjs * sizeof(DB_LOCKOBJ); + } + break; + case DB_LOCK_MEM: + if (newmem < howmuch * 2) { + incr += howmuch * 2 - newmem; + newmem = howmuch * 2; + } + break; + } + + newmem += ALIGN(incr, sizeof(size_t)) - incr; + incr = ALIGN(incr, sizeof(size_t)); + + /* + * Since we are going to be allocating locks at the beginning of the + * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT + * aligned. We did not guarantee this when we created the region, so + * we may need to pad the old region by extra bytes to ensure this + * alignment. + */ + incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize; + + __db_err(lt->dbenv, + "Growing lock region: %lu locks %lu objs %lu bytes", + (u_long)newlocks, (u_long)newobjs, (u_long)newmem); + + if ((ret = __db_rgrow(<->reginfo, oldsize + incr)) != 0) + return (ret); + lt->region = lt->reginfo.addr; + __lock_reset_region(lt); + + /* Update region parameters. */ + lrp = lt->region; + lrp->increment = incr << 1; + lrp->maxlocks += newlocks; + lrp->numobjs += newobjs; + lrp->mem_bytes += newmem; + + curaddr = (u_int8_t *)lrp + oldsize; + curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); + + /* Put new locks onto the free list. */ + lock_head = &lrp->free_locks; + for (i = 0; i++ < newlocks; + curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { + newl = (struct __db_lock *)curaddr; + SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock); + } + + /* Put new objects onto the free list. */ + obj_head = &lrp->free_objs; + for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) { + op = (DB_LOCKOBJ *)curaddr; + SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); + } + + *((size_t *)curaddr) = newmem - sizeof(size_t); + curaddr += sizeof(size_t); + __db_shalloc_free(lt->mem, curaddr); + + return (0); +} + +static void +__lock_reset_region(lt) + DB_LOCKTAB *lt; +{ + lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); + lt->hashtab = + (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); + lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); +} + +/* + * lock_stat -- + * Return LOCK statistics. + */ +int +lock_stat(lt, gspp, db_malloc) + DB_LOCKTAB *lt; + DB_LOCK_STAT **gspp; + void *(*db_malloc) __P((size_t)); +{ + DB_LOCKREGION *rp; + + *gspp = NULL; + + if ((*gspp = db_malloc == NULL ? + (DB_LOCK_STAT *)__db_malloc(sizeof(**gspp)) : + (DB_LOCK_STAT *)db_malloc(sizeof(**gspp))) == NULL) + return (ENOMEM); + + /* Copy out the global statistics. */ + LOCK_LOCKREGION(lt); + + rp = lt->region; + (*gspp)->st_magic = rp->magic; + (*gspp)->st_version = rp->version; + (*gspp)->st_maxlocks = rp->maxlocks; + (*gspp)->st_nmodes = rp->nmodes; + (*gspp)->st_numobjs = rp->numobjs; + (*gspp)->st_nlockers = rp->nlockers; + (*gspp)->st_nconflicts = rp->nconflicts; + (*gspp)->st_nrequests = rp->nrequests; + (*gspp)->st_nreleases = rp->nreleases; + (*gspp)->st_ndeadlocks = rp->ndeadlocks; + (*gspp)->st_region_nowait = rp->hdr.lock.mutex_set_nowait; + (*gspp)->st_region_wait = rp->hdr.lock.mutex_set_wait; + (*gspp)->st_refcnt = rp->hdr.refcnt; + (*gspp)->st_regsize = rp->hdr.size; + + UNLOCK_LOCKREGION(lt); + + return (0); +} + +static u_int32_t +__lock_count_locks(lrp) + DB_LOCKREGION *lrp; +{ + struct __db_lock *newl; + u_int32_t count; + + count = 0; + for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); + newl != NULL; + newl = SH_TAILQ_NEXT(newl, links, __db_lock)) + count++; + + return (count); +} + +static u_int32_t +__lock_count_objs(lrp) + DB_LOCKREGION *lrp; +{ + DB_LOCKOBJ *obj; + u_int32_t count; + + count = 0; + for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); + obj != NULL; + obj = SH_TAILQ_NEXT(obj, links, __db_lockobj)) + count++; + + return (count); +} + +#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */ +#define LOCK_DUMP_FREE 0x002 /* Display lock free list. */ +#define LOCK_DUMP_LOCKERS 0x004 /* Display lockers. */ +#define LOCK_DUMP_MEM 0x008 /* Display region memory. */ +#define LOCK_DUMP_OBJECTS 0x010 /* Display objects. */ +#define LOCK_DUMP_ALL 0x01f /* Display all. */ + +/* + * __lock_dump_region -- + * + * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, char *, FILE *)); + */ +void +__lock_dump_region(lt, area, fp) + DB_LOCKTAB *lt; + char *area; + FILE *fp; +{ + struct __db_lock *lp; + DB_LOCKOBJ *op; + DB_LOCKREGION *lrp; + u_int32_t flags, i, j; + int label; + + /* Make it easy to call from the debugger. */ + if (fp == NULL) + fp = stderr; + + for (flags = 0; *area != '\0'; ++area) + switch (*area) { + case 'A': + LF_SET(LOCK_DUMP_ALL); + break; + case 'c': + LF_SET(LOCK_DUMP_CONF); + break; + case 'f': + LF_SET(LOCK_DUMP_FREE); + break; + case 'l': + LF_SET(LOCK_DUMP_LOCKERS); + break; + case 'm': + LF_SET(LOCK_DUMP_MEM); + break; + case 'o': + LF_SET(LOCK_DUMP_OBJECTS); + break; + } + + lrp = lt->region; + + fprintf(fp, "%s\nLock region parameters\n", DB_LINE); + fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu\n%s: %lu, %s: %lu\n", + "table size", (u_long)lrp->table_size, + "hash_off", (u_long)lrp->hash_off, + "increment", (u_long)lrp->increment, + "mem_off", (u_long)lrp->mem_off, + "mem_bytes", (u_long)lrp->mem_bytes, + "need_dd", (u_long)lrp->need_dd); + + if (LF_ISSET(LOCK_DUMP_CONF)) { + fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE); + for (i = 0; i < lrp->nmodes; i++) { + for (j = 0; j < lrp->nmodes; j++) + fprintf(fp, "%lu\t", + (u_long)lt->conflicts[i * lrp->nmodes + j]); + fprintf(fp, "\n"); + } + } + + if (LF_ISSET(LOCK_DUMP_LOCKERS | LOCK_DUMP_OBJECTS)) { + fprintf(fp, "%s\nLock hash buckets\n", DB_LINE); + for (i = 0; i < lrp->table_size; i++) { + label = 1; + for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); + op != NULL; + op = SH_TAILQ_NEXT(op, links, __db_lockobj)) { + if (LF_ISSET(LOCK_DUMP_LOCKERS) && + op->type == DB_LOCK_LOCKER) { + if (label) { + fprintf(fp, + "Bucket %lu:\n", (u_long)i); + label = 0; + } + __lock_dump_locker(lt, op, fp); + } + if (LF_ISSET(LOCK_DUMP_OBJECTS) && + op->type == DB_LOCK_OBJTYPE) { + if (label) { + fprintf(fp, + "Bucket %lu:\n", (u_long)i); + label = 0; + } + __lock_dump_object(lt, op, fp); + } + } + } + } + + if (LF_ISSET(LOCK_DUMP_FREE)) { + fprintf(fp, "%s\nLock free list\n", DB_LINE); + for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); + lp != NULL; + lp = SH_TAILQ_NEXT(lp, links, __db_lock)) + fprintf(fp, "0x%x: %lu\t%lu\t%s\t0x%x\n", (u_int)lp, + (u_long)lp->holder, (u_long)lp->mode, + __lock_dump_status(lp->status), (u_int)lp->obj); + + fprintf(fp, "%s\nObject free list\n", DB_LINE); + for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); + op != NULL; + op = SH_TAILQ_NEXT(op, links, __db_lockobj)) + fprintf(fp, "0x%x\n", (u_int)op); + } + + if (LF_ISSET(LOCK_DUMP_MEM)) + __db_shalloc_dump(lt->mem, fp); +} + +static void +__lock_dump_locker(lt, op, fp) + DB_LOCKTAB *lt; + DB_LOCKOBJ *op; + FILE *fp; +{ + struct __db_lock *lp; + u_int32_t locker; + void *ptr; + + ptr = SH_DBT_PTR(&op->lockobj); + memcpy(&locker, ptr, sizeof(u_int32_t)); + fprintf(fp, "L %lx", (u_long)locker); + + lp = SH_LIST_FIRST(&op->heldby, __db_lock); + if (lp == NULL) { + fprintf(fp, "\n"); + return; + } + for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) + __lock_printlock(lt, lp, 0); +} + +static void +__lock_dump_object(lt, op, fp) + DB_LOCKTAB *lt; + DB_LOCKOBJ *op; + FILE *fp; +{ + struct __db_lock *lp; + u_int32_t j; + u_int8_t *ptr; + u_int ch; + + ptr = SH_DBT_PTR(&op->lockobj); + for (j = 0; j < op->lockobj.size; ptr++, j++) { + ch = *ptr; + fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch); + } + fprintf(fp, "\n"); + + fprintf(fp, "H:"); + for (lp = + SH_TAILQ_FIRST(&op->holders, __db_lock); + lp != NULL; + lp = SH_TAILQ_NEXT(lp, links, __db_lock)) + __lock_printlock(lt, lp, 0); + lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); + if (lp != NULL) { + fprintf(fp, "\nW:"); + for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) + __lock_printlock(lt, lp, 0); + } +} + +static const char * +__lock_dump_status(status) + db_status_t status; +{ + switch (status) { + case DB_LSTAT_ABORTED: + return ("aborted"); + case DB_LSTAT_ERR: + return ("err"); + case DB_LSTAT_FREE: + return ("free"); + case DB_LSTAT_HELD: + return ("held"); + case DB_LSTAT_NOGRANT: + return ("nogrant"); + case DB_LSTAT_PENDING: + return ("pending"); + case DB_LSTAT_WAITING: + return ("waiting"); + } + return ("unknown status"); +} diff --git a/db2/lock/lock_util.c b/db2/lock/lock_util.c index 6c1e30f..7274a50 100644 --- a/db2/lock/lock_util.c +++ b/db2/lock/lock_util.c @@ -1,25 +1,20 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 1997 + * Copyright (c) 1996, 1997, 1998 * Sleepycat Software. All rights reserved. */ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock_util.c 10.5 (Sleepycat) 1/8/98"; +static const char sccsid[] = "@(#)lock_util.c 10.9 (Sleepycat) 4/26/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES #include <sys/types.h> -#include <fcntl.h> -#include <stddef.h> -#include <stdio.h> -#include <stdlib.h> #include <string.h> -#include <unistd.h> #endif #include "db_int.h" @@ -30,11 +25,13 @@ static const char sccsid[] = "@(#)lock_util.c 10.5 (Sleepycat) 1/8/98"; #include "lock.h" /* - * This function is used to compare a DBT that is about to be entered - * into a hash table with an object already in the hash table. Note - * that it just returns true on equal and 0 on not-equal. Therefore this - * cannot be used as a sort function; its purpose is to be used as a - * hash comparison function. + * __lock_cmp -- + * This function is used to compare a DBT that is about to be entered + * into a hash table with an object already in the hash table. Note + * that it just returns true on equal and 0 on not-equal. Therefore + * this function cannot be used as a sort function; its purpose is to + * be used as a hash comparison function. + * * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *)); */ int @@ -46,6 +43,7 @@ __lock_cmp(dbt, lock_obj) if (lock_obj->type != DB_LOCK_OBJTYPE) return (0); + obj_data = SH_DBT_PTR(&lock_obj->lockobj); return (dbt->size == lock_obj->lockobj.size && memcmp(dbt->data, obj_data, dbt->size) == 0); @@ -69,35 +67,86 @@ __lock_locker_cmp(locker, lock_obj) } /* - * PUBLIC: int __lock_ohash __P((const DBT *)); + * The next two functions are the hash functions used to store objects in the + * lock hash table. They are hashing the same items, but one (__lock_ohash) + * takes a DBT (used for hashing a parameter passed from the user) and the + * other (__lock_lhash) takes a DB_LOCKOBJ (used for hashing something that is + * already in the lock manager). In both cases, we have a special check to + * fast path the case where we think we are doing a hash on a DB page/fileid + * pair. If the size is right, then we do the fast hash. + * + * We know that DB uses struct __db_ilocks for its lock objects. The first + * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes + * are a unique file id, where the first 4 bytes on UNIX systems are the file + * inode number, and the first 4 bytes on Windows systems are the FileIndexLow + * bytes. So, we use the XOR of the page number and the first four bytes of + * the file id to produce a 32-bit hash value. + * + * We have no particular reason to believe that this algorithm will produce + * a good hash, but we want a fast hash more than we want a good one, when + * we're coming through this code path. */ -int -__lock_ohash(dbt) - const DBT *dbt; -{ - return (__ham_func5(dbt->data, dbt->size)); +#define FAST_HASH(P) { \ + u_int32_t __h; \ + u_int8_t *__cp, *__hp; \ + __hp = (u_int8_t *)&__h; \ + __cp = (u_int8_t *)(P); \ + __hp[0] = __cp[0] ^ __cp[4]; \ + __hp[1] = __cp[1] ^ __cp[5]; \ + __hp[2] = __cp[2] ^ __cp[6]; \ + __hp[3] = __cp[3] ^ __cp[7]; \ + return (__h); \ } /* - * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t)); + * __lock_ohash -- + * + * PUBLIC: u_int32_t __lock_ohash __P((const DBT *)); */ u_int32_t -__lock_locker_hash(locker) - u_int32_t locker; +__lock_ohash(dbt) + const DBT *dbt; { - return (__ham_func5(&locker, sizeof(locker))); + if (dbt->size == sizeof(struct __db_ilock)) + FAST_HASH(dbt->data); + + return (__ham_func5(dbt->data, dbt->size)); } /* + * __lock_lhash -- + * * PUBLIC: u_int32_t __lock_lhash __P((DB_LOCKOBJ *)); */ u_int32_t __lock_lhash(lock_obj) DB_LOCKOBJ *lock_obj; { + u_int32_t tmp; void *obj_data; obj_data = SH_DBT_PTR(&lock_obj->lockobj); + if (lock_obj->type == DB_LOCK_LOCKER) { + memcpy(&tmp, obj_data, sizeof(u_int32_t)); + return (tmp); + } + + if (lock_obj->lockobj.size == sizeof(struct __db_ilock)) + FAST_HASH(obj_data); + return (__ham_func5(obj_data, lock_obj->lockobj.size)); } +/* + * __lock_locker_hash -- + * Hash function for entering lockers into the hash table. Since these + * are simply 32-bit unsigned integers, just return the locker value. + * + * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t)); + */ +u_int32_t +__lock_locker_hash(locker) + u_int32_t locker; +{ + return (locker); +} |