aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincenzo Maffione <v.maffione@gmail.com>2018-12-06 17:59:07 +0100
committerJason Wang <jasowang@redhat.com>2019-03-05 11:27:40 +0800
commit4875bf140662b214ad9f94b0d566354977d8c01d (patch)
tree9d989767f0ba5826e12492234446034febbf1000
parentc7cbb6b48fc2dbfeeb4c126ee4b909220a418fb0 (diff)
downloadqemu-4875bf140662b214ad9f94b0d566354977d8c01d.zip
qemu-4875bf140662b214ad9f94b0d566354977d8c01d.tar.gz
qemu-4875bf140662b214ad9f94b0d566354977d8c01d.tar.bz2
net: netmap: improve netmap_receive_iov()
Changes: - Save CPU cycles by computing the return value while scanning the input iovec, rather than calling iov_size() at the end. - Remove check for s->tx != NULL, because it cannot happen. - Cache ring->tail in a local variable and use it to check for space in the TX ring. The use of nm_ring_empty() was invalid, because nobody is updating ring->cur and ring->head at that point. - In case we run out of netmap slots in the middle of a packet, move the wake-up point by advancing ring->cur, but do not expose the incomplete packet (i.e., by updating also ring->head). Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
-rw-r--r--net/netmap.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/net/netmap.c b/net/netmap.c
index 852106a..0cc8f54 100644
--- a/net/netmap.c
+++ b/net/netmap.c
@@ -159,21 +159,22 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
{
NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
struct netmap_ring *ring = s->tx;
+ unsigned int tail = ring->tail;
+ ssize_t totlen = 0;
uint32_t last;
uint32_t idx;
uint8_t *dst;
int j;
uint32_t i;
- if (unlikely(!ring)) {
- /* Drop the packet. */
- return iov_size(iov, iovcnt);
- }
-
- last = i = ring->cur;
+ last = i = ring->head;
if (nm_ring_space(ring) < iovcnt) {
- /* Not enough netmap slots. */
+ /* Not enough netmap slots. Tell the kernel that we have seen the new
+ * available slots (so that it notifies us again when it has more
+ * ones), but without publishing any new slots to be processed
+ * (e.g., we don't advance ring->head). */
+ ring->cur = tail;
netmap_write_poll(s, true);
return 0;
}
@@ -183,14 +184,17 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
int offset = 0;
int nm_frag_size;
+ totlen += iov_frag_size;
+
/* Split each iovec fragment over more netmap slots, if
necessary. */
while (iov_frag_size) {
nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
- if (unlikely(nm_ring_empty(ring))) {
- /* We run out of netmap slots while splitting the
+ if (unlikely(i == tail)) {
+ /* We ran out of netmap slots while splitting the
iovec fragments. */
+ ring->cur = tail;
netmap_write_poll(s, true);
return 0;
}
@@ -212,12 +216,13 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
/* The last slot must not have NS_MOREFRAG set. */
ring->slot[last].flags &= ~NS_MOREFRAG;
- /* Now update ring->cur and ring->head. */
- ring->cur = ring->head = i;
+ /* Now update ring->head and ring->cur to publish the new slots and
+ * the new wakeup point. */
+ ring->head = ring->cur = i;
ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
- return iov_size(iov, iovcnt);
+ return totlen;
}
static ssize_t netmap_receive(NetClientState *nc,