aboutsummaryrefslogtreecommitdiff
path: root/net/netmap.c
diff options
context:
space:
mode:
authorVincenzo Maffione <v.maffione@gmail.com>2018-12-06 17:59:05 +0100
committerJason Wang <jasowang@redhat.com>2019-03-05 11:27:40 +0800
commitcc599ed6d46250b2178eca1a06a4602acd83b706 (patch)
treeee79baf42dcbfb4675532c3c674db8f704adb98c /net/netmap.c
parent6d3aaa5b255ffc55a0561d359159fdaaccf09b31 (diff)
downloadqemu-cc599ed6d46250b2178eca1a06a4602acd83b706.zip
qemu-cc599ed6d46250b2178eca1a06a4602acd83b706.tar.gz
qemu-cc599ed6d46250b2178eca1a06a4602acd83b706.tar.bz2
net: netmap: small improvements netmap_send()
This change improves the handling of incomplete multi-slot packets (e.g. with the NS_MOREFRAG set), by advancing ring->head only on complete packets. The ring->cur pointer is advanced in any case in order to acknowledge the kernel and move the wake-up point (thus avoiding repeated wake-ups). Also don't be verbose when incomplete packets are found. Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'net/netmap.c')
-rw-r--r--net/netmap.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/net/netmap.c b/net/netmap.c
index 2d11a8f..71a8122 100644
--- a/net/netmap.c
+++ b/net/netmap.c
@@ -272,39 +272,46 @@ static void netmap_send(void *opaque)
{
NetmapState *s = opaque;
struct netmap_ring *ring = s->rx;
+ unsigned int tail = ring->tail;
- /* Keep sending while there are available packets into the netmap
+ /* Keep sending while there are available slots in the netmap
RX ring and the forwarding path towards the peer is open. */
- while (!nm_ring_empty(ring)) {
- uint32_t i;
+ while (ring->head != tail) {
+ uint32_t i = ring->head;
uint32_t idx;
bool morefrag;
int iovcnt = 0;
int iovsize;
+ /* Get a (possibly multi-slot) packet. */
do {
- i = ring->cur;
idx = ring->slot[i].buf_idx;
morefrag = (ring->slot[i].flags & NS_MOREFRAG);
- s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
+ s->iov[iovcnt].iov_base = (void *)NETMAP_BUF(ring, idx);
s->iov[iovcnt].iov_len = ring->slot[i].len;
iovcnt++;
+ i = nm_ring_next(ring, i);
+ } while (i != tail && morefrag);
- ring->cur = ring->head = nm_ring_next(ring, i);
- } while (!nm_ring_empty(ring) && morefrag);
+ /* Advance ring->cur to tell the kernel that we have seen the slots. */
+ ring->cur = i;
- if (unlikely(nm_ring_empty(ring) && morefrag)) {
- RD(5, "[netmap_send] ran out of slots, with a pending"
- "incomplete packet\n");
+ if (unlikely(morefrag)) {
+ /* This is a truncated packet, so we can stop without releasing the
+ * incomplete slots by updating ring->head. We will hopefully
+ * re-read the complete packet the next time we are called. */
+ break;
}
iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
netmap_send_completed);
+ /* Release the slots to the kernel. */
+ ring->head = i;
+
if (iovsize == 0) {
/* The peer does not receive anymore. Packet is queued, stop
- * reading from the backend until netmap_send_completed()
- */
+ * reading from the backend until netmap_send_completed(). */
netmap_read_poll(s, false);
break;
}