aboutsummaryrefslogtreecommitdiff
path: root/winsup/cygwin
diff options
context:
space:
mode:
authorCorinna Vinschen <corinna@vinschen.de>2018-03-18 18:46:15 +0100
committerCorinna Vinschen <corinna@vinschen.de>2018-03-18 18:46:15 +0100
commit60ca1c1359e4b5b133a685623c99e14cc44507f0 (patch)
tree2213cae99c70083c52fbcd351c9043508d6dbfe8 /winsup/cygwin
parent1f41bc16f1f07768702010c5a2f7c1a64d0ce7fc (diff)
downloadnewlib-60ca1c1359e4b5b133a685623c99e14cc44507f0.zip
newlib-60ca1c1359e4b5b133a685623c99e14cc44507f0.tar.gz
newlib-60ca1c1359e4b5b133a685623c99e14cc44507f0.tar.bz2
Cygwin: AF_UNIX: Use spinlock rather than SRWLOCKs
We need to share socket info between threads *and* processes. SRWLOCKs are single-process only, unfortunately. Provide a sharable low-profile spinlock instead. Signed-off-by: Corinna Vinschen <corinna@vinschen.de>
Diffstat (limited to 'winsup/cygwin')
-rw-r--r--winsup/cygwin/fhandler.h56
1 files changed, 39 insertions, 17 deletions
diff --git a/winsup/cygwin/fhandler.h b/winsup/cygwin/fhandler.h
index 2e50208..0b04d7a 100644
--- a/winsup/cygwin/fhandler.h
+++ b/winsup/cygwin/fhandler.h
@@ -825,6 +825,32 @@ class fhandler_socket_local: public fhandler_socket_wsock
}
};
+/* Sharable spinlock with low CPU profile. These locks are NOT recursive! */
+class af_unix_spinlock_t
+{
+ LONG locked; /* 0 oder 1 */
+
+public:
+ void lock ()
+ {
+ LONG ret = InterlockedExchange (&locked, 1);
+ if (ret)
+ {
+ /* This loop counts the ms Sleep up from 0 to 45 in loop, 15ms steps,
+ with 256 iterations each, . */
+ for (uint16_t i = 0; ret; i += 64)
+ {
+ Sleep (15 * (i >> 14));
+ ret = InterlockedExchange (&locked, 1);
+ }
+ }
+ }
+ void unlock ()
+ {
+ InterlockedExchange (&locked, 0);
+ }
+};
+
class sun_name_t
{
public:
@@ -855,9 +881,13 @@ enum shut_state {
in socket, socketpair or accept4 and reopened by dup, fork or exec. */
class af_unix_shmem_t
{
- SRWLOCK _bind_lock;
- SRWLOCK _conn_lock;
- SRWLOCK _io_lock;
+ /* Don't use SRWLOCKs here. They are not sharable. If you must lock
+ multiple locks at the same time, always lock in the order bind ->
+ conn -> state -> io and unlock io -> state -> conn -> bind to avoid
+ deadlocks. */
+ af_unix_spinlock_t _bind_lock;
+ af_unix_spinlock_t _conn_lock;
+ af_unix_spinlock_t _io_lock;
LONG _connection_state; /* conn_state */
LONG _binding_state; /* bind_state */
LONG _shutdown; /* shut_state */
@@ -865,20 +895,12 @@ class af_unix_shmem_t
LONG _reuseaddr; /* dummy */
public:
- af_unix_shmem_t ()
- : _connection_state (unconnected), _binding_state (unbound),
- _shutdown (0), _so_error (0)
- {
- InitializeSRWLock (&_bind_lock);
- InitializeSRWLock (&_conn_lock);
- InitializeSRWLock (&_io_lock);
- }
- void bind_lock () { AcquireSRWLockExclusive (&_bind_lock); }
- void bind_unlock () { ReleaseSRWLockExclusive (&_bind_lock); }
- void conn_lock () { AcquireSRWLockExclusive (&_conn_lock); }
- void conn_unlock () { ReleaseSRWLockExclusive (&_conn_lock); }
- void io_lock () { AcquireSRWLockExclusive (&_io_lock); }
- void io_unlock () { ReleaseSRWLockExclusive (&_io_lock); }
+ void bind_lock () { _bind_lock.lock (); }
+ void bind_unlock () { _bind_lock.unlock (); }
+ void conn_lock () { _conn_lock.lock (); }
+ void conn_unlock () { _conn_lock.unlock (); }
+ void io_lock () { _io_lock.lock (); }
+ void io_unlock () { _io_lock.unlock (); }
conn_state connect_state (conn_state val)
{ return (conn_state) InterlockedExchange (&_connection_state, val); }