aboutsummaryrefslogtreecommitdiff
path: root/newlib/libc/search
diff options
context:
space:
mode:
authorJon Beniston <jon@beniston.com>2018-09-06 13:53:15 +0100
committerCorinna Vinschen <corinna@vinschen.de>2018-09-06 17:29:49 +0200
commitbd993df0e6aee5080a791b54cb0074ad76bab681 (patch)
treebad92084e5f5375b5d99af387e2806b3e41542cd /newlib/libc/search
parent77f8a6dfab98d8afd662c34ea832579f0565b40c (diff)
downloadnewlib-bd993df0e6aee5080a791b54cb0074ad76bab681.zip
newlib-bd993df0e6aee5080a791b54cb0074ad76bab681.tar.gz
newlib-bd993df0e6aee5080a791b54cb0074ad76bab681.tar.bz2
search: Fix Berkeley DB hash code for 16-bit targets.
hash.h: Use 32-bit type for data stored on disk, so code works for 16 and 64-bit targets. Reduce maximum bucket size on 16-bit targets, so it fits in available memory. hash.c: Check bucket size isn't too big for target. hash_buf.c: Fix overflow warning on 16-bit targets.
Diffstat (limited to 'newlib/libc/search')
-rw-r--r--newlib/libc/search/hash.c5
-rw-r--r--newlib/libc/search/hash.h45
-rw-r--r--newlib/libc/search/hash_buf.c2
3 files changed, 32 insertions, 20 deletions
diff --git a/newlib/libc/search/hash.c b/newlib/libc/search/hash.c
index af2be9a..e409618 100644
--- a/newlib/libc/search/hash.c
+++ b/newlib/libc/search/hash.c
@@ -193,6 +193,9 @@ __hash_open (const char *file,
RETURN_ERROR(EFTYPE, error1);
if (hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
RETURN_ERROR(EFTYPE, error1);
+ /* Check bucket size isn't too big for target int. */
+ if (hashp->BSIZE > INT_MAX)
+ RETURN_ERROR(EFTYPE, error1);
/*
* Figure out how many segments we need. Max_Bucket is the
* maximum bucket number, so the number of buckets is
@@ -343,7 +346,7 @@ init_hash(hashp, file, info)
if (stat(file, &statbuf))
#endif
return (NULL);
- hashp->BSIZE = statbuf.st_blksize;
+ hashp->BSIZE = MIN(statbuf.st_blksize, MAX_BSIZE);
hashp->BSHIFT = __log2(hashp->BSIZE);
}
diff --git a/newlib/libc/search/hash.h b/newlib/libc/search/hash.h
index 6491814..1b094d6 100644
--- a/newlib/libc/search/hash.h
+++ b/newlib/libc/search/hash.h
@@ -40,6 +40,7 @@
#include <sys/param.h>
#define __need_size_t
#include <stddef.h>
+#include <stdint.h>
/* Check that newlib understands the byte order of its target system. */
#ifndef BYTE_ORDER
@@ -82,28 +83,28 @@ typedef BUFHEAD **SEGMENT;
/* Hash Table Information */
typedef struct hashhdr { /* Disk resident portion */
- int magic; /* Magic NO for hash tables */
- int version; /* Version ID */
+ int32_t magic; /* Magic NO for hash tables */
+ int32_t version; /* Version ID */
__uint32_t lorder; /* Byte Order */
- int bsize; /* Bucket/Page Size */
- int bshift; /* Bucket shift */
- int dsize; /* Directory Size */
- int ssize; /* Segment Size */
- int sshift; /* Segment shift */
- int ovfl_point; /* Where overflow pages are being
+ int32_t bsize; /* Bucket/Page Size */
+ int32_t bshift; /* Bucket shift */
+ int32_t dsize; /* Directory Size */
+ int32_t ssize; /* Segment Size */
+ int32_t sshift; /* Segment shift */
+ int32_t ovfl_point; /* Where overflow pages are being
* allocated */
- int last_freed; /* Last overflow page freed */
- int max_bucket; /* ID of Maximum bucket in use */
- int high_mask; /* Mask to modulo into entire table */
- int low_mask; /* Mask to modulo into lower half of
+ int32_t last_freed; /* Last overflow page freed */
+ int32_t max_bucket; /* ID of Maximum bucket in use */
+ int32_t high_mask; /* Mask to modulo into entire table */
+ int32_t low_mask; /* Mask to modulo into lower half of
* table */
- int ffactor; /* Fill factor */
- int nkeys; /* Number of keys in hash table */
- int hdrpages; /* Size of table header */
- int h_charkey; /* value of hash(CHARKEY) */
+ int32_t ffactor; /* Fill factor */
+ int32_t nkeys; /* Number of keys in hash table */
+ int32_t hdrpages; /* Size of table header */
+ int32_t h_charkey; /* value of hash(CHARKEY) */
#define NCACHED 32 /* number of bit maps and spare
* points */
- int spares[NCACHED];/* spare pages for overflow */
+ int32_t spares[NCACHED];/* spare pages for overflow */
__uint16_t bitmaps[NCACHED]; /* address of overflow page
* bitmaps */
} HASHHDR;
@@ -120,7 +121,7 @@ typedef struct htab { /* Memory resident data structure */
char *tmp_buf; /* Temporary Buffer for BIG data */
char *tmp_key; /* Temporary Buffer for BIG keys */
BUFHEAD *cpage; /* Current page */
- int cbucket; /* Current bucket */
+ int32_t cbucket; /* Current bucket */
int cndx; /* Index of next item on cpage */
int error; /* Error Number -- for DBM
* compatibility */
@@ -140,10 +141,18 @@ typedef struct htab { /* Memory resident data structure */
/*
* Constants
*/
+#if INT_MAX == 32767
+#define MAX_BSIZE 4096
+#else
#define MAX_BSIZE 65536 /* 2^16 */
+#endif
#define MIN_BUFFERS 6
#define MINHDRSIZE 512
+#if INT_MAX == 32767
+#define DEF_BUFSIZE 4096
+#else
#define DEF_BUFSIZE 65536 /* 64 K */
+#endif
#define DEF_BUCKET_SIZE 4096
#define DEF_BUCKET_SHIFT 12 /* log2(BUCKET) */
#define DEF_SEGSIZE 256
diff --git a/newlib/libc/search/hash_buf.c b/newlib/libc/search/hash_buf.c
index d50fc57..81475e9 100644
--- a/newlib/libc/search/hash_buf.c
+++ b/newlib/libc/search/hash_buf.c
@@ -151,7 +151,7 @@ __get_buf(hashp, addr, prev_bp, newpage)
return (NULL);
if (!prev_bp)
segp[segment_ndx] =
- (BUFHEAD *)((ptrdiff_t)bp | is_disk_mask);
+ (BUFHEAD *)((ptrdiff_t)bp | (intptr_t)is_disk_mask);
} else {
BUF_REMOVE(bp);
MRU_INSERT(bp);