aboutsummaryrefslogtreecommitdiff
path: root/block/nbd.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-04-14 19:57:54 +0200
committerEric Blake <eblake@redhat.com>2022-04-26 13:16:39 -0500
commitdba5156c0e9c0362b7c6121f9e1c89bb9be1f227 (patch)
tree926943dbe6f7b1248c10b30cc3ae0d77a7a58adf /block/nbd.c
parent8d45185cb76fa1dd7c3309940a967dc42d8619d4 (diff)
downloadqemu-dba5156c0e9c0362b7c6121f9e1c89bb9be1f227.zip
qemu-dba5156c0e9c0362b7c6121f9e1c89bb9be1f227.tar.gz
qemu-dba5156c0e9c0362b7c6121f9e1c89bb9be1f227.tar.bz2
nbd: move s->state under requests_lock
Remove the confusing, and most likely wrong, atomics. The only function that used to be somewhat in a hot path was nbd_client_connected(), but it is not anymore after the previous patches. The same logic is used both to check if a request had to be reissued and also in nbd_reconnecting_attempt(). The former cases are outside requests_lock, while nbd_reconnecting_attempt() does have the lock, therefore the two have been separated in the previous commit. nbd_client_will_reconnect() can simply take s->requests_lock, while nbd_reconnecting_attempt() can inline the access now that no complicated atomics are involved. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220414175756.671165-8-pbonzini@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@openvz.org> Reviewed-by: Lukas Straub <lukasstraub2@web.de> Signed-off-by: Eric Blake <eblake@redhat.com>
Diffstat (limited to 'block/nbd.c')
-rw-r--r--block/nbd.c76
1 files changed, 40 insertions, 36 deletions
diff --git a/block/nbd.c b/block/nbd.c
index 3cba874..a5c690c 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -35,7 +35,6 @@
#include "qemu/option.h"
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
-#include "qemu/atomic.h"
#include "qapi/qapi-visit-sockets.h"
#include "qapi/qmp/qstring.h"
@@ -72,10 +71,11 @@ typedef struct BDRVNBDState {
NBDExportInfo info;
/*
- * Protects free_sema, in_flight, requests[].coroutine,
+ * Protects state, free_sema, in_flight, requests[].coroutine,
* reconnect_delay_timer.
*/
QemuMutex requests_lock;
+ NBDClientState state;
CoQueue free_sema;
int in_flight;
NBDClientRequest requests[MAX_NBD_REQUESTS];
@@ -83,7 +83,6 @@ typedef struct BDRVNBDState {
CoMutex send_mutex;
CoMutex receive_mutex;
- NBDClientState state;
QEMUTimer *open_timer;
@@ -132,11 +131,6 @@ static void nbd_clear_bdrvstate(BlockDriverState *bs)
s->x_dirty_bitmap = NULL;
}
-static bool nbd_client_connected(BDRVNBDState *s)
-{
- return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED;
-}
-
static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
{
if (req->receiving) {
@@ -159,14 +153,15 @@ static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s, bool all)
}
}
-static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
+/* Called with s->requests_lock held. */
+static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
{
- if (nbd_client_connected(s)) {
+ if (s->state == NBD_CLIENT_CONNECTED) {
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
}
if (ret == -EIO) {
- if (nbd_client_connected(s)) {
+ if (s->state == NBD_CLIENT_CONNECTED) {
s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
NBD_CLIENT_CONNECTING_NOWAIT;
}
@@ -177,6 +172,12 @@ static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
nbd_recv_coroutines_wake(s, true);
}
+static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
+{
+ QEMU_LOCK_GUARD(&s->requests_lock);
+ nbd_channel_error_locked(s, ret);
+}
+
static void reconnect_delay_timer_del(BDRVNBDState *s)
{
if (s->reconnect_delay_timer) {
@@ -189,20 +190,18 @@ static void reconnect_delay_timer_cb(void *opaque)
{
BDRVNBDState *s = opaque;
- if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
+ reconnect_delay_timer_del(s);
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
+ return;
+ }
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
- nbd_co_establish_connection_cancel(s->conn);
}
-
- reconnect_delay_timer_del(s);
+ nbd_co_establish_connection_cancel(s->conn);
}
static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
{
- if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) {
- return;
- }
-
assert(!s->reconnect_delay_timer);
s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
QEMU_CLOCK_REALTIME,
@@ -225,7 +224,9 @@ static void nbd_teardown_connection(BlockDriverState *bs)
s->ioc = NULL;
}
- s->state = NBD_CLIENT_QUIT;
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ s->state = NBD_CLIENT_QUIT;
+ }
}
static void open_timer_del(BDRVNBDState *s)
@@ -254,15 +255,15 @@ static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
timer_mod(s->open_timer, expire_time_ns);
}
-static bool nbd_client_connecting_wait(BDRVNBDState *s)
-{
- return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
-}
-
static bool nbd_client_will_reconnect(BDRVNBDState *s)
{
- return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
+ /*
+ * Called only after a socket error, so this is not performance sensitive.
+ */
+ QEMU_LOCK_GUARD(&s->requests_lock);
+ return s->state == NBD_CLIENT_CONNECTING_WAIT;
}
+
/*
* Update @bs with information learned during a completed negotiation process.
* Return failure if the server's advertised options are incompatible with the
@@ -347,22 +348,24 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs));
/* successfully connected */
- s->state = NBD_CLIENT_CONNECTED;
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ s->state = NBD_CLIENT_CONNECTED;
+ }
return 0;
}
+/* Called with s->requests_lock held. */
static bool nbd_client_connecting(BDRVNBDState *s)
{
- NBDClientState state = qatomic_load_acquire(&s->state);
- return state == NBD_CLIENT_CONNECTING_WAIT ||
- state == NBD_CLIENT_CONNECTING_NOWAIT;
+ return s->state == NBD_CLIENT_CONNECTING_WAIT ||
+ s->state == NBD_CLIENT_CONNECTING_NOWAIT;
}
/* Called with s->requests_lock taken. */
static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
{
- bool blocking = nbd_client_connecting_wait(s);
+ bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
/*
* Now we are sure that nobody is accessing the channel, and no one will
@@ -477,17 +480,17 @@ static int coroutine_fn nbd_co_send_request(BlockDriverState *bs,
qemu_mutex_lock(&s->requests_lock);
while (s->in_flight == MAX_NBD_REQUESTS ||
- (!nbd_client_connected(s) && s->in_flight > 0)) {
+ (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
}
s->in_flight++;
- if (!nbd_client_connected(s)) {
+ if (s->state != NBD_CLIENT_CONNECTED) {
if (nbd_client_connecting(s)) {
nbd_reconnect_attempt(s);
qemu_co_queue_restart_all(&s->free_sema);
}
- if (!nbd_client_connected(s)) {
+ if (s->state != NBD_CLIENT_CONNECTED) {
rc = -EIO;
goto err;
}
@@ -530,7 +533,7 @@ static int coroutine_fn nbd_co_send_request(BlockDriverState *bs,
if (rc < 0) {
qemu_mutex_lock(&s->requests_lock);
err:
- nbd_channel_error(s, rc);
+ nbd_channel_error_locked(s, rc);
if (i != -1) {
s->requests[i].coroutine = NULL;
}
@@ -1443,8 +1446,9 @@ static void nbd_yank(void *opaque)
BlockDriverState *bs = opaque;
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
- qatomic_store_release(&s->state, NBD_CLIENT_QUIT);
+ QEMU_LOCK_GUARD(&s->requests_lock);
qio_channel_shutdown(QIO_CHANNEL(s->ioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
+ s->state = NBD_CLIENT_QUIT;
}
static void nbd_client_close(BlockDriverState *bs)