From 0b1fa2880c60dcb97d15e5d96e5640f3672dae48 Mon Sep 17 00:00:00 2001 From: Stefan Teleman Date: Fri, 20 Nov 2015 15:28:40 +0000 Subject: Fix a problem with the maximum number of open files held in the cache when running on a 32-bit Solaris host. PR ld/19260 * cache.c (bfd_cache_max_open): Avoid using getrlimit on 32-bit Solaris as the result is unreliable. --- bfd/cache.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'bfd/cache.c') diff --git a/bfd/cache.c b/bfd/cache.c index 94a82da..8c9a238 100644 --- a/bfd/cache.c +++ b/bfd/cache.c @@ -78,18 +78,36 @@ bfd_cache_max_open (void) if (max_open_files == 0) { int max; +#if defined(__sun) && !defined(__sparcv9) && !defined(__x86_64__) + /* PR ld/19260: 32-bit Solaris has very inelegant handling of the 255 + file descriptor limit. The problem is that setrlimit(2) can raise + RLIMIT_NOFILE to a value that is not supported by libc, resulting + in "Too many open files" errors. This can happen here even though + max_open_files is set to rlim.rlim_cur / 8. For example, if + a parent process has set rlim.rlim_cur to 65536, then max_open_files + will be computed as 8192. + + This check essentially reverts to the behavior from binutils 2.23.1 + for 32-bit Solaris only. (It is hoped that the 32-bit libc + limitation will be removed soon). 64-bit Solaris libc does not have + this limitation. */ + max = 16; +#else #ifdef HAVE_GETRLIMIT struct rlimit rlim; + if (getrlimit (RLIMIT_NOFILE, &rlim) == 0 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY) max = rlim.rlim_cur / 8; else -#endif /* HAVE_GETRLIMIT */ +#endif #ifdef _SC_OPEN_MAX max = sysconf (_SC_OPEN_MAX) / 8; #else - max = 10; -#endif /* _SC_OPEN_MAX */ + max = 10; +#endif +#endif /* not 32-bit Solaris */ + max_open_files = max < 10 ? 10 : max; } -- cgit v1.1