aboutsummaryrefslogtreecommitdiff
path: root/util/fdmon-poll.c
blob: 17df917cf962a9f73000c970ec616f229c4e4f9a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * poll(2) file descriptor monitoring
 *
 * Uses ppoll(2) when available, g_poll() otherwise.
 */

#include "qemu/osdep.h"
#include "aio-posix.h"
#include "qemu/rcu_queue.h"

/*
 * These thread-local variables are used only in fdmon_poll_wait() around the
 * call to the poll() system call.  In particular they are not used while
 * aio_poll is performing callbacks, which makes it much easier to think about
 * reentrancy!
 *
 * Stack-allocated arrays would be perfect but they have size limitations;
 * heap allocation is expensive enough that we want to reuse arrays across
 * calls to aio_poll().  And because poll() has to be called without holding
 * any lock, the arrays cannot be stored in AioContext.  Thread-local data
 * has none of the disadvantages of these three options.
 */
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;

static void pollfds_cleanup(Notifier *n, void *unused)
{
    g_assert(npfd == 0);
    g_free(pollfds);
    g_free(nodes);
    nalloc = 0;
}

static void add_pollfd(AioHandler *node)
{
    if (npfd == nalloc) {
        if (nalloc == 0) {
            pollfds_cleanup_notifier.notify = pollfds_cleanup;
            qemu_thread_atexit_add(&pollfds_cleanup_notifier);
            nalloc = 8;
        } else {
            g_assert(nalloc <= INT_MAX);
            nalloc *= 2;
        }
        pollfds = g_renew(GPollFD, pollfds, nalloc);
        nodes = g_renew(AioHandler *, nodes, nalloc);
    }
    nodes[npfd] = node;
    pollfds[npfd] = (GPollFD) {
        .fd = node->pfd.fd,
        .events = node->pfd.events,
    };
    npfd++;
}

static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
                            int64_t timeout)
{
    AioHandler *node;
    int ret;

    assert(npfd == 0);

    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
            add_pollfd(node);
        }
    }

    /* epoll(7) is faster above a certain number of fds */
    if (fdmon_epoll_try_upgrade(ctx, npfd)) {
        npfd = 0; /* we won't need pollfds[], reset npfd */
        return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
    }

    ret = qemu_poll_ns(pollfds, npfd, timeout);
    if (ret > 0) {
        int i;

        for (i = 0; i < npfd; i++) {
            int revents = pollfds[i].revents;

            if (revents) {
                aio_add_ready_handler(ready_list, nodes[i], revents);
            }
        }
    }

    npfd = 0;
    return ret;
}

static void fdmon_poll_update(AioContext *ctx,
                              AioHandler *old_node,
                              AioHandler *new_node)
{
    /* Do nothing, AioHandler already contains the state we'll need */
}

const FDMonOps fdmon_poll_ops = {
    .update = fdmon_poll_update,
    .wait = fdmon_poll_wait,
    .need_wait = aio_poll_disabled,
};