aboutsummaryrefslogtreecommitdiff
path: root/include/hw/virtio/vhost.h
blob: c7e5467693df2ac2d9ff5f7076a7618abdeaf83c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
#ifndef VHOST_H
#define VHOST_H

#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio.h"
#include "exec/memory.h"

#define VHOST_F_DEVICE_IOTLB 63
#define VHOST_USER_F_PROTOCOL_FEATURES 30

/* Generic structures common for any vhost based device. */

struct vhost_inflight {
    int fd;
    void *addr;
    uint64_t size;
    uint64_t offset;
    uint16_t queue_size;
};

struct vhost_virtqueue {
    int kick;
    int call;
    void *desc;
    void *avail;
    void *used;
    int num;
    unsigned long long desc_phys;
    unsigned desc_size;
    unsigned long long avail_phys;
    unsigned avail_size;
    unsigned long long used_phys;
    unsigned used_size;
    EventNotifier masked_notifier;
    EventNotifier error_notifier;
    EventNotifier masked_config_notifier;
    struct vhost_dev *dev;
};

typedef unsigned long vhost_log_chunk_t;
#define VHOST_LOG_PAGE 0x1000
#define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
#define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
#define VHOST_INVALID_FEATURE_BIT   (0xff)
#define VHOST_QUEUE_NUM_CONFIG_INR 0

struct vhost_log {
    unsigned long long size;
    int refcnt;
    int fd;
    vhost_log_chunk_t *log;
};

struct vhost_dev;
struct vhost_iommu {
    struct vhost_dev *hdev;
    MemoryRegion *mr;
    hwaddr iommu_offset;
    IOMMUNotifier n;
    QLIST_ENTRY(vhost_iommu) iommu_next;
};

typedef struct VhostDevConfigOps {
    /* Vhost device config space changed callback
     */
    int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
} VhostDevConfigOps;

struct vhost_memory;

/**
 * struct vhost_dev - common vhost_dev structure
 * @vhost_ops: backend specific ops
 * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier)
 */
struct vhost_dev {
    VirtIODevice *vdev;
    MemoryListener memory_listener;
    MemoryListener iommu_listener;
    struct vhost_memory *mem;
    int n_mem_sections;
    MemoryRegionSection *mem_sections;
    int n_tmp_sections;
    MemoryRegionSection *tmp_sections;
    struct vhost_virtqueue *vqs;
    unsigned int nvqs;
    /* the first virtqueue which would be used by this vhost dev */
    int vq_index;
    /* one past the last vq index for the virtio device (not vhost) */
    int vq_index_end;
    /* if non-zero, minimum required value for max_queues */
    int num_queues;
    /**
     * vhost feature handling requires matching the feature set
     * offered by a backend which may be a subset of the total
     * features eventually offered to the guest.
     *
     * @features: available features provided by the backend
     * @acked_features: final negotiated features with front-end driver
     *
     * @backend_features: this is used in a couple of places to either
     * store VHOST_USER_F_PROTOCOL_FEATURES to apply to
     * VHOST_USER_SET_FEATURES or VHOST_NET_F_VIRTIO_NET_HDR. Its
     * future use should be discouraged and the variable retired as
     * its easy to confuse with the VirtIO backend_features.
     */
    uint64_t features;
    uint64_t acked_features;
    uint64_t backend_features;

    /**
     * @protocol_features: is the vhost-user only feature set by
     * VHOST_USER_SET_PROTOCOL_FEATURES. Protocol features are only
     * negotiated if VHOST_USER_F_PROTOCOL_FEATURES has been offered
     * by the backend (see @features).
     */
    uint64_t protocol_features;

    uint64_t max_queues;
    uint64_t backend_cap;
    /* @started: is the vhost device started? */
    bool started;
    bool log_enabled;
    uint64_t log_size;
    Error *migration_blocker;
    const VhostOps *vhost_ops;
    void *opaque;
    struct vhost_log *log;
    QLIST_ENTRY(vhost_dev) entry;
    QLIST_HEAD(, vhost_iommu) iommu_list;
    IOMMUNotifier n;
    const VhostDevConfigOps *config_ops;
};

extern const VhostOps kernel_ops;
extern const VhostOps user_ops;
extern const VhostOps vdpa_ops;

struct vhost_net {
    struct vhost_dev dev;
    struct vhost_virtqueue vqs[2];
    int backend;
    NetClientState *nc;
};

/**
 * vhost_dev_init() - initialise the vhost interface
 * @hdev: the common vhost_dev structure
 * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa)
 * @backend_type: type of backend
 * @busyloop_timeout: timeout for polling virtqueue
 * @errp: error handle
 *
 * The initialisation of the vhost device will trigger the
 * initialisation of the backend and potentially capability
 * negotiation of backend interface. Configuration of the VirtIO
 * itself won't happen until the interface is started.
 *
 * Return: 0 on success, non-zero on error while setting errp.
 */
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                   VhostBackendType backend_type,
                   uint32_t busyloop_timeout, Error **errp);

/**
 * vhost_dev_cleanup() - tear down and cleanup vhost interface
 * @hdev: the common vhost_dev structure
 */
void vhost_dev_cleanup(struct vhost_dev *hdev);

/**
 * vhost_dev_enable_notifiers() - enable event notifiers
 * @hdev: common vhost_dev structure
 * @vdev: the VirtIODevice structure
 *
 * Enable notifications directly to the vhost device rather than being
 * triggered by QEMU itself. Notifications should be enabled before
 * the vhost device is started via @vhost_dev_start.
 *
 * Return: 0 on success, < 0 on error.
 */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);

/**
 * vhost_dev_disable_notifiers - disable event notifications
 * @hdev: common vhost_dev structure
 * @vdev: the VirtIODevice structure
 *
 * Disable direct notifications to vhost device.
 */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
bool vhost_config_pending(struct vhost_dev *hdev);
void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask);

/**
 * vhost_dev_is_started() - report status of vhost device
 * @hdev: common vhost_dev structure
 *
 * Return the started status of the vhost device
 */
static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
{
    return hdev->started;
}

/**
 * vhost_dev_start() - start the vhost device
 * @hdev: common vhost_dev structure
 * @vdev: the VirtIODevice structure
 * @vrings: true to have vrings enabled in this call
 *
 * Starts the vhost device. From this point VirtIO feature negotiation
 * can start and the device can start processing VirtIO transactions.
 *
 * Return: 0 on success, < 0 on error.
 */
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);

/**
 * vhost_dev_stop() - stop the vhost device
 * @hdev: common vhost_dev structure
 * @vdev: the VirtIODevice structure
 * @vrings: true to have vrings disabled in this call
 *
 * Stop the vhost device. After the device is stopped the notifiers
 * can be disabled (@vhost_dev_disable_notifiers) and the device can
 * be torn down (@vhost_dev_cleanup).
 */
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);

/**
 * DOC: vhost device configuration handling
 *
 * The VirtIO device configuration space is used for rarely changing
 * or initialisation time parameters. The configuration can be updated
 * by either the guest driver or the device itself. If the device can
 * change the configuration over time the vhost handler should
 * register a @VhostDevConfigOps structure with
 * @vhost_dev_set_config_notifier so the guest can be notified. Some
 * devices register a handler anyway and will signal an error if an
 * unexpected config change happens.
 */

/**
 * vhost_dev_get_config() - fetch device configuration
 * @hdev: common vhost_dev_structure
 * @config: pointer to device appropriate config structure
 * @config_len: size of device appropriate config structure
 *
 * Return: 0 on success, < 0 on error while setting errp
 */
int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
                         uint32_t config_len, Error **errp);

/**
 * vhost_dev_set_config() - set device configuration
 * @hdev: common vhost_dev_structure
 * @data: pointer to data to set
 * @offset: offset into configuration space
 * @size: length of set
 * @flags: @VhostSetConfigType flags
 *
 * By use of @offset/@size a subset of the configuration space can be
 * written to. The @flags are used to indicate if it is a normal
 * transaction or related to migration.
 *
 * Return: 0 on success, non-zero on error
 */
int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
                         uint32_t offset, uint32_t size, uint32_t flags);

/**
 * vhost_dev_set_config_notifier() - register VhostDevConfigOps
 * @hdev: common vhost_dev_structure
 * @ops: notifier ops
 *
 * If the device is expected to change configuration a notifier can be
 * setup to handle the case.
 */
void vhost_dev_set_config_notifier(struct vhost_dev *dev,
                                   const VhostDevConfigOps *ops);


/* Test and clear masked event pending status.
 * Should be called after unmask to avoid losing events.
 */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);

/* Mask/unmask events from this vq.
 */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
                          bool mask);

/**
 * vhost_get_features() - return a sanitised set of feature bits
 * @hdev: common vhost_dev structure
 * @feature_bits: pointer to terminated table of feature bits
 * @features: original feature set
 *
 * This returns a set of features bits that is an intersection of what
 * is supported by the vhost backend (hdev->features), the supported
 * feature_bits and the requested feature set.
 */
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
                            uint64_t features);

/**
 * vhost_ack_features() - set vhost acked_features
 * @hdev: common vhost_dev structure
 * @feature_bits: pointer to terminated table of feature bits
 * @features: requested feature set
 *
 * This sets the internal hdev->acked_features to the intersection of
 * the backends advertised features and the supported feature_bits.
 */
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
                        uint64_t features);
unsigned int vhost_get_max_memslots(void);
unsigned int vhost_get_free_memslots(void);

int vhost_net_set_backend(struct vhost_dev *hdev,
                          struct vhost_vring_file *file);

void vhost_toggle_device_iotlb(VirtIODevice *vdev);
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);

int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
                          struct vhost_virtqueue *vq, unsigned idx);
void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
                          struct vhost_virtqueue *vq, unsigned idx);

void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_set_inflight(struct vhost_dev *dev,
                           struct vhost_inflight *inflight);
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
                           struct vhost_inflight *inflight);
bool vhost_dev_has_iommu(struct vhost_dev *dev);
#endif