1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
|
/*
* vmnet-common.m - network client wrapper for Apple vmnet.framework
*
* Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
* Copyright(c) 2021 Phillip Tennen <phillip@axleos.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/log.h"
#include "qapi/qapi-types-net.h"
#include "vmnet_int.h"
#include "clients.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "sysemu/runstate.h"
#include <vmnet/vmnet.h>
#include <dispatch/dispatch.h>
static void vmnet_send_completed(NetClientState *nc, ssize_t len);
const char *vmnet_status_map_str(vmnet_return_t status)
{
switch (status) {
case VMNET_SUCCESS:
return "success";
case VMNET_FAILURE:
return "general failure (possibly not enough privileges)";
case VMNET_MEM_FAILURE:
return "memory allocation failure";
case VMNET_INVALID_ARGUMENT:
return "invalid argument specified";
case VMNET_SETUP_INCOMPLETE:
return "interface setup is not complete";
case VMNET_INVALID_ACCESS:
return "invalid access, permission denied";
case VMNET_PACKET_TOO_BIG:
return "packet size is larger than MTU";
case VMNET_BUFFER_EXHAUSTED:
return "buffers exhausted in kernel";
case VMNET_TOO_MANY_PACKETS:
return "packet count exceeds limit";
#if defined(MAC_OS_VERSION_11_0) && \
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
case VMNET_SHARING_SERVICE_BUSY:
return "conflict, sharing service is in use";
#endif
default:
return "unknown vmnet error";
}
}
/**
* Write packets from QEMU to vmnet interface.
*
* vmnet.framework supports iov, but writing more than
* one iov into vmnet interface fails with
* 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into
* one and passing it to vmnet works fine. That's the
* reason why receive_iov() left unimplemented. But it still
* works with good performance having .receive() only.
*/
ssize_t vmnet_receive_common(NetClientState *nc,
const uint8_t *buf,
size_t size)
{
VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
struct vmpktdesc packet;
struct iovec iov;
int pkt_cnt;
vmnet_return_t if_status;
if (size > s->max_packet_size) {
warn_report("vmnet: packet is too big, %zu > %" PRIu64,
packet.vm_pkt_size,
s->max_packet_size);
return -1;
}
iov.iov_base = (char *) buf;
iov.iov_len = size;
packet.vm_pkt_iovcnt = 1;
packet.vm_flags = 0;
packet.vm_pkt_size = size;
packet.vm_pkt_iov = &iov;
pkt_cnt = 1;
if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
if (if_status != VMNET_SUCCESS) {
error_report("vmnet: write error: %s\n",
vmnet_status_map_str(if_status));
return -1;
}
if (pkt_cnt) {
return size;
}
return 0;
}
/**
* Read packets from vmnet interface and write them
* to temporary buffers in VmnetState.
*
* Returns read packets number (may be 0) on success,
* -1 on error
*/
static int vmnet_read_packets(VmnetState *s)
{
assert(s->packets_send_current_pos == s->packets_send_end_pos);
struct vmpktdesc *packets = s->packets_buf;
vmnet_return_t status;
int i;
/* Read as many packets as present */
s->packets_send_current_pos = 0;
s->packets_send_end_pos = VMNET_PACKETS_LIMIT;
for (i = 0; i < s->packets_send_end_pos; ++i) {
packets[i].vm_pkt_size = s->max_packet_size;
packets[i].vm_pkt_iovcnt = 1;
packets[i].vm_flags = 0;
}
status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos);
if (status != VMNET_SUCCESS) {
error_printf("vmnet: read failed: %s\n",
vmnet_status_map_str(status));
s->packets_send_current_pos = 0;
s->packets_send_end_pos = 0;
return -1;
}
return s->packets_send_end_pos;
}
/**
* Write packets from temporary buffers in VmnetState
* to QEMU.
*/
static void vmnet_write_packets_to_qemu(VmnetState *s)
{
while (s->packets_send_current_pos < s->packets_send_end_pos) {
ssize_t size = qemu_send_packet_async(&s->nc,
s->iov_buf[s->packets_send_current_pos].iov_base,
s->packets_buf[s->packets_send_current_pos].vm_pkt_size,
vmnet_send_completed);
if (size == 0) {
/* QEMU is not ready to consume more packets -
* stop and wait for completion callback call */
return;
}
++s->packets_send_current_pos;
}
}
/**
* Bottom half callback that transfers packets from vmnet interface
* to QEMU.
*
* The process of transferring packets is three-staged:
* 1. Handle vmnet event;
* 2. Read packets from vmnet interface into temporary buffer;
* 3. Write packets from temporary buffer to QEMU.
*
* QEMU may suspend this process on the last stage, returning 0 from
* qemu_send_packet_async function. If this happens, we should
* respectfully wait until it is ready to consume more packets,
* write left ones in temporary buffer and only after this
* continue reading more packets from vmnet interface.
*
* Packets to be transferred are stored into packets_buf,
* in the window [packets_send_current_pos..packets_send_end_pos)
* including current_pos, excluding end_pos.
*
* Thus, if QEMU is not ready, buffer is not read and
* packets_send_current_pos < packets_send_end_pos.
*/
static void vmnet_send_bh(void *opaque)
{
NetClientState *nc = (NetClientState *) opaque;
VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
/*
* Do nothing if QEMU is not ready - wait
* for completion callback invocation
*/
if (s->packets_send_current_pos < s->packets_send_end_pos) {
return;
}
/* Read packets from vmnet interface */
if (vmnet_read_packets(s) > 0) {
/* Send them to QEMU */
vmnet_write_packets_to_qemu(s);
}
}
/**
* Completion callback to be invoked by QEMU when it becomes
* ready to consume more packets.
*/
static void vmnet_send_completed(NetClientState *nc, ssize_t len)
{
VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
/* Callback is invoked eq queued packet is sent */
++s->packets_send_current_pos;
/* Complete sending packets left in VmnetState buffers */
vmnet_write_packets_to_qemu(s);
/* And read new ones from vmnet if VmnetState buffer is ready */
if (s->packets_send_current_pos < s->packets_send_end_pos) {
qemu_bh_schedule(s->send_bh);
}
}
static void vmnet_bufs_init(VmnetState *s)
{
struct vmpktdesc *packets = s->packets_buf;
struct iovec *iov = s->iov_buf;
int i;
for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
iov[i].iov_len = s->max_packet_size;
iov[i].iov_base = g_malloc0(iov[i].iov_len);
packets[i].vm_pkt_iov = iov + i;
}
}
/**
* Called on state change to un-register/re-register handlers
*/
static void vmnet_vm_state_change_cb(void *opaque, bool running, RunState state)
{
VmnetState *s = opaque;
if (running) {
vmnet_interface_set_event_callback(
s->vmnet_if,
VMNET_INTERFACE_PACKETS_AVAILABLE,
s->if_queue,
^(interface_event_t event_id, xpc_object_t event) {
assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE);
/*
* This function is being called from a non qemu thread, so
* we only schedule a BH, and do the rest of the io completion
* handling from vmnet_send_bh() which runs in a qemu context.
*/
qemu_bh_schedule(s->send_bh);
});
} else {
vmnet_interface_set_event_callback(
s->vmnet_if,
VMNET_INTERFACE_PACKETS_AVAILABLE,
NULL,
NULL);
}
}
int vmnet_if_create(NetClientState *nc,
xpc_object_t if_desc,
Error **errp)
{
VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0);
__block vmnet_return_t if_status;
s->if_queue = dispatch_queue_create(
"org.qemu.vmnet.if_queue",
DISPATCH_QUEUE_SERIAL
);
xpc_dictionary_set_bool(
if_desc,
vmnet_allocate_mac_address_key,
false
);
#ifdef DEBUG
qemu_log("vmnet.start.interface_desc:\n");
xpc_dictionary_apply(if_desc,
^bool(const char *k, xpc_object_t v) {
char *desc = xpc_copy_description(v);
qemu_log(" %s=%s\n", k, desc);
free(desc);
return true;
});
#endif /* DEBUG */
s->vmnet_if = vmnet_start_interface(
if_desc,
s->if_queue,
^(vmnet_return_t status, xpc_object_t interface_param) {
if_status = status;
if (status != VMNET_SUCCESS || !interface_param) {
dispatch_semaphore_signal(if_created_sem);
return;
}
#ifdef DEBUG
qemu_log("vmnet.start.interface_param:\n");
xpc_dictionary_apply(interface_param,
^bool(const char *k, xpc_object_t v) {
char *desc = xpc_copy_description(v);
qemu_log(" %s=%s\n", k, desc);
free(desc);
return true;
});
#endif /* DEBUG */
s->mtu = xpc_dictionary_get_uint64(
interface_param,
vmnet_mtu_key);
s->max_packet_size = xpc_dictionary_get_uint64(
interface_param,
vmnet_max_packet_size_key);
dispatch_semaphore_signal(if_created_sem);
});
if (s->vmnet_if == NULL) {
dispatch_release(s->if_queue);
dispatch_release(if_created_sem);
error_setg(errp,
"unable to create interface with requested params");
return -1;
}
dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER);
dispatch_release(if_created_sem);
if (if_status != VMNET_SUCCESS) {
dispatch_release(s->if_queue);
error_setg(errp,
"cannot create vmnet interface: %s",
vmnet_status_map_str(if_status));
return -1;
}
s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc);
vmnet_bufs_init(s);
s->packets_send_current_pos = 0;
s->packets_send_end_pos = 0;
vmnet_vm_state_change_cb(s, 1, RUN_STATE_RUNNING);
s->change = qemu_add_vm_change_state_handler(vmnet_vm_state_change_cb, s);
return 0;
}
void vmnet_cleanup_common(NetClientState *nc)
{
VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
dispatch_semaphore_t if_stopped_sem;
if (s->vmnet_if == NULL) {
return;
}
vmnet_vm_state_change_cb(s, 0, RUN_STATE_SHUTDOWN);
qemu_del_vm_change_state_handler(s->change);
if_stopped_sem = dispatch_semaphore_create(0);
vmnet_stop_interface(
s->vmnet_if,
s->if_queue,
^(vmnet_return_t status) {
assert(status == VMNET_SUCCESS);
dispatch_semaphore_signal(if_stopped_sem);
});
dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER);
qemu_purge_queued_packets(nc);
qemu_bh_delete(s->send_bh);
dispatch_release(if_stopped_sem);
dispatch_release(s->if_queue);
for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
g_free(s->iov_buf[i].iov_base);
}
}
|