aboutsummaryrefslogtreecommitdiff
path: root/block/plug.c
blob: 98a155d2f425343fbb400d30da8d3f7bed91b3a1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Block I/O plugging
 *
 * Copyright Red Hat.
 *
 * This API defers a function call within a blk_io_plug()/blk_io_unplug()
 * section, allowing multiple calls to batch up. This is a performance
 * optimization that is used in the block layer to submit several I/O requests
 * at once instead of individually:
 *
 *   blk_io_plug(); <-- start of plugged region
 *   ...
 *   blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call
 *   blk_io_plug_call(my_func, my_obj); <-- another
 *   blk_io_plug_call(my_func, my_obj); <-- another
 *   ...
 *   blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once
 *
 * This code is actually generic and not tied to the block layer. If another
 * subsystem needs this functionality, it could be renamed.
 */

#include "qemu/osdep.h"
#include "qemu/coroutine-tls.h"
#include "qemu/notify.h"
#include "qemu/thread.h"
#include "sysemu/block-backend.h"

/* A function call that has been deferred until unplug() */
typedef struct {
    void (*fn)(void *);
    void *opaque;
} UnplugFn;

/* Per-thread state */
typedef struct {
    unsigned count;       /* how many times has plug() been called? */
    GArray *unplug_fns;   /* functions to call at unplug time */
} Plug;

/* Use get_ptr_plug() to fetch this thread-local value */
QEMU_DEFINE_STATIC_CO_TLS(Plug, plug);

/* Called at thread cleanup time */
static void blk_io_plug_atexit(Notifier *n, void *value)
{
    Plug *plug = get_ptr_plug();
    g_array_free(plug->unplug_fns, TRUE);
}

/* This won't involve coroutines, so use __thread */
static __thread Notifier blk_io_plug_atexit_notifier;

/**
 * blk_io_plug_call:
 * @fn: a function pointer to be invoked
 * @opaque: a user-defined argument to @fn()
 *
 * Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug()
 * section.
 *
 * Otherwise defer the call until the end of the outermost
 * blk_io_plug()/blk_io_unplug() section in this thread. If the same
 * @fn/@opaque pair has already been deferred, it will only be called once upon
 * blk_io_unplug() so that accumulated calls are batched into a single call.
 *
 * The caller must ensure that @opaque is not freed before @fn() is invoked.
 */
void blk_io_plug_call(void (*fn)(void *), void *opaque)
{
    Plug *plug = get_ptr_plug();

    /* Call immediately if we're not plugged */
    if (plug->count == 0) {
        fn(opaque);
        return;
    }

    GArray *array = plug->unplug_fns;
    if (!array) {
        array = g_array_new(FALSE, FALSE, sizeof(UnplugFn));
        plug->unplug_fns = array;
        blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit;
        qemu_thread_atexit_add(&blk_io_plug_atexit_notifier);
    }

    UnplugFn *fns = (UnplugFn *)array->data;
    UnplugFn new_fn = {
        .fn = fn,
        .opaque = opaque,
    };

    /*
     * There won't be many, so do a linear search. If this becomes a bottleneck
     * then a binary search (glib 2.62+) or different data structure could be
     * used.
     */
    for (guint i = 0; i < array->len; i++) {
        if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) {
            return; /* already exists */
        }
    }

    g_array_append_val(array, new_fn);
}

/**
 * blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug()
 *
 * blk_io_plug/unplug are thread-local operations. This means that multiple
 * threads can simultaneously call plug/unplug, but the caller must ensure that
 * each unplug() is called in the same thread of the matching plug().
 *
 * Nesting is supported. blk_io_plug_call() functions are only called at the
 * outermost blk_io_unplug().
 */
void blk_io_plug(void)
{
    Plug *plug = get_ptr_plug();

    assert(plug->count < UINT32_MAX);

    plug->count++;
}

/**
 * blk_io_unplug: Run any pending blk_io_plug_call() functions
 *
 * There must have been a matching blk_io_plug() call in the same thread prior
 * to this blk_io_unplug() call.
 */
void blk_io_unplug(void)
{
    Plug *plug = get_ptr_plug();

    assert(plug->count > 0);

    if (--plug->count > 0) {
        return;
    }

    GArray *array = plug->unplug_fns;
    if (!array) {
        return;
    }

    UnplugFn *fns = (UnplugFn *)array->data;

    for (guint i = 0; i < array->len; i++) {
        fns[i].fn(fns[i].opaque);
    }

    /*
     * This resets the array without freeing memory so that appending is cheap
     * in the future.
     */
    g_array_set_size(array, 0);
}