aboutsummaryrefslogtreecommitdiff
path: root/bfd/cache.c
diff options
context:
space:
mode:
authorJoel Brobecker <brobecker@gnat.com>2008-05-01 15:45:43 +0000
committerJoel Brobecker <brobecker@gnat.com>2008-05-01 15:45:43 +0000
commitf12a02c018093a098f27099297903362c47ca150 (patch)
treecfd94abb511b94f8bd2cbb3273fec6d49b26038a /bfd/cache.c
parent961c521ff952676089e85d64d712fcd3d192e4e0 (diff)
downloadfsf-binutils-gdb-f12a02c018093a098f27099297903362c47ca150.zip
fsf-binutils-gdb-f12a02c018093a098f27099297903362c47ca150.tar.gz
fsf-binutils-gdb-f12a02c018093a098f27099297903362c47ca150.tar.bz2
* cache.c (cache_bread_1): Renames cache_bread.
(cache_bread): New function.
Diffstat (limited to 'bfd/cache.c')
-rw-r--r--bfd/cache.c39
1 files changed, 38 insertions, 1 deletions
diff --git a/bfd/cache.c b/bfd/cache.c
index eb6120d..3906335 100644
--- a/bfd/cache.c
+++ b/bfd/cache.c
@@ -250,7 +250,7 @@ cache_bseek (struct bfd *abfd, file_ptr offset, int whence)
first octet in the file, NOT the beginning of the archive header. */
static file_ptr
-cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
+cache_bread_1 (struct bfd *abfd, void *buf, file_ptr nbytes)
{
FILE *f;
file_ptr nread;
@@ -301,6 +301,43 @@ cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
}
static file_ptr
+cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
+{
+ file_ptr nread = 0;
+
+ /* Some filesystems are unable to handle reads that are too large
+ (for instance, NetApp shares with oplocks turned off). To avoid
+ hitting this limitation, we read the buffer in chunks of 8MB max. */
+ while (nread < nbytes)
+ {
+ const file_ptr max_chunk_size = 0x800000;
+ file_ptr chunk_size = nbytes - nread;
+ file_ptr chunk_nread;
+
+ if (chunk_size > max_chunk_size)
+ chunk_size = max_chunk_size;
+
+ chunk_nread = cache_bread_1 (abfd, buf + nread, chunk_size);
+
+ /* Update the nread count.
+
+ We just have to be careful of the case when cache_bread_1 returns
+ a negative count: If this is our first read, then set nread to
+ that negative count in order to return that negative value to the
+ caller. Otherwise, don't add it to our total count, or we would
+ end up returning a smaller number of bytes read than we actually
+ did. */
+ if (nread == 0 || chunk_nread > 0)
+ nread += chunk_nread;
+
+ if (chunk_nread < chunk_size)
+ break;
+ }
+
+ return nread;
+}
+
+static file_ptr
cache_bwrite (struct bfd *abfd, const void *where, file_ptr nbytes)
{
file_ptr nwrite;