aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-06-04 12:28:57 -0500
committerRichard Henderson <richard.henderson@linaro.org>2024-06-04 12:28:57 -0500
commit6e47f7cfcd78ed8e6f192cb0a4c61f209d0c2aaf (patch)
treee5fb8fdab3f8f710d8f5df0dbd717c293bccf7dd /hw
parent121e47c8bff8013bdce1ae2ae79bd2e16260c512 (diff)
parentdcab53611191f50cf4feabc1d8794d04afe53407 (diff)
downloadqemu-6e47f7cfcd78ed8e6f192cb0a4c61f209d0c2aaf.zip
qemu-6e47f7cfcd78ed8e6f192cb0a4c61f209d0c2aaf.tar.gz
qemu-6e47f7cfcd78ed8e6f192cb0a4c61f209d0c2aaf.tar.bz2
Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging
# -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEIV1G9IJGaJ7HfzVi7wSWWzmNYhEFAmZewo4ACgkQ7wSWWzmN # YhHhxgf/ZaECxru4fP8wi34XdSG/PR+BF+W5M9gZIRGrHg3vIf3/LRTpZTDccbRN # Qpwtypr9O6/AWG9Os80rn7alsmMDxN8PDDNLa9T3wf5pJUQSyQ87Yy0MiuTNPSKD # HKYUIfIlbFCM5WUW4huMmg98gKTgnzZMqOoRyMFZitbkR59qCm+Exws4HtXvCH68 # 3k4lgvnFccmzO9iIzaOUIPs+Yf04Kw/FrY0Q/6nypvqbF2W80Md6w02JMQuTLwdF # Guxeg/n6g0NLvCBbkjiM2VWfTaWJYbwFSwRTAMxM/geqh7qAgGsmD0N5lPlgqRDy # uAy2GvFyrwzcD0lYqf0/fRK0Go0HPA== # =J70K # -----END PGP SIGNATURE----- # gpg: Signature made Tue 04 Jun 2024 02:30:22 AM CDT # gpg: using RSA key 215D46F48246689EC77F3562EF04965B398D6211 # gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 215D 46F4 8246 689E C77F 3562 EF04 965B 398D 6211 * tag 'net-pull-request' of https://github.com/jasowang/qemu: ebpf: Added traces back. Changed source set for eBPF to 'system'. virtio-net: drop too short packets early ebpf: Add a separate target for skeleton ebpf: Refactor tun_rss_steering_prog() ebpf: Return 0 when configuration fails ebpf: Fix RSS error handling virtio-net: Do not write hashes to peer buffer virtio-net: Always set populate_hash virtio-net: Unify the logic to update NIC state for RSS virtio-net: Disable RSS on reset virtio-net: Shrink header byte swapping buffer virtio-net: Copy header only when necessary virtio-net: Add only one queue pair when realizing virtio-net: Do not propagate ebpf-rss-fds errors tap: Shrink zeroed virtio-net header tap: Call tap_receive_iov() from tap_receive() net: Remove receive_raw() net: Move virtio-net header length assertion tap: Remove qemu_using_vnet_hdr() tap: Remove tap_probe_vnet_hdr_len() Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/net/e1000e.c1
-rw-r--r--hw/net/igb.c1
-rw-r--r--hw/net/net_tx_pkt.c4
-rw-r--r--hw/net/virtio-net.c282
-rw-r--r--hw/net/vmxnet3.c2
5 files changed, 130 insertions, 160 deletions
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index edc101e..843892c 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -352,7 +352,6 @@ e1000e_init_net_peer(E1000EState *s, PCIDevice *pci_dev, uint8_t *macaddr)
for (i = 0; i < s->conf.peers.queues; i++) {
nc = qemu_get_subqueue(s->nic, i);
qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr));
- qemu_using_vnet_hdr(nc->peer, true);
}
}
diff --git a/hw/net/igb.c b/hw/net/igb.c
index 1ef6170..b92bba4 100644
--- a/hw/net/igb.c
+++ b/hw/net/igb.c
@@ -349,7 +349,6 @@ igb_init_net_peer(IGBState *s, PCIDevice *pci_dev, uint8_t *macaddr)
for (i = 0; i < s->conf.peers.queues; i++) {
nc = qemu_get_subqueue(s->nic, i);
qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr));
- qemu_using_vnet_hdr(nc->peer, true);
}
}
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index b7b1de8..1f79b82 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -582,7 +582,7 @@ static void net_tx_pkt_sendv(
{
NetClientState *nc = opaque;
- if (qemu_get_using_vnet_hdr(nc->peer)) {
+ if (qemu_get_vnet_hdr_len(nc->peer)) {
qemu_sendv_packet(nc, virt_iov, virt_iov_cnt);
} else {
qemu_sendv_packet(nc, iov, iov_cnt);
@@ -812,7 +812,7 @@ static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
- bool offload = qemu_get_using_vnet_hdr(nc->peer);
+ bool offload = qemu_get_vnet_hdr_len(nc->peer);
return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc);
}
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 24e5e7d..9c7e85c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -360,7 +360,8 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
* can't do it, we fallback onto fixing the headers in the core
* virtio-net code.
*/
- n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
+ n->needs_vnet_hdr_swap = n->has_vnet_hdr &&
+ virtio_net_set_vnet_endian(vdev, n->nic->ncs,
queue_pairs, true);
} else if (virtio_net_started(n, vdev->status)) {
/* After using the device, we need to reset the network backend to
@@ -599,40 +600,6 @@ static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
}
}
-static void virtio_net_reset(VirtIODevice *vdev)
-{
- VirtIONet *n = VIRTIO_NET(vdev);
- int i;
-
- /* Reset back to compatibility mode */
- n->promisc = 1;
- n->allmulti = 0;
- n->alluni = 0;
- n->nomulti = 0;
- n->nouni = 0;
- n->nobcast = 0;
- /* multiqueue is disabled by default */
- n->curr_queue_pairs = 1;
- timer_del(n->announce_timer.tm);
- n->announce_timer.round = 0;
- n->status &= ~VIRTIO_NET_S_ANNOUNCE;
-
- /* Flush any MAC and VLAN filter table state */
- n->mac_table.in_use = 0;
- n->mac_table.first_multi = 0;
- n->mac_table.multi_overflow = 0;
- n->mac_table.uni_overflow = 0;
- memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
- memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
- qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
- memset(n->vlans, 0, MAX_VLAN >> 3);
-
- /* Flush any async TX */
- for (i = 0; i < n->max_queue_pairs; i++) {
- flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
- }
-}
-
static void peer_test_vnet_hdr(VirtIONet *n)
{
NetClientState *nc = qemu_get_queue(n->nic);
@@ -675,11 +642,6 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
n->mergeable_rx_bufs = mergeable_rx_bufs;
- /*
- * Note: when extending the vnet header, please make sure to
- * change the vnet header copying logic in virtio_net_flush_tx()
- * as well.
- */
if (version_1) {
n->guest_hdr_len = hash_report ?
sizeof(struct virtio_net_hdr_v1_hash) :
@@ -689,6 +651,7 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
n->guest_hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
sizeof(struct virtio_net_hdr);
+ n->rss_data.populate_hash = false;
}
for (i = 0; i < n->max_queue_pairs; i++) {
@@ -1270,18 +1233,6 @@ static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
}
}
-static void virtio_net_detach_epbf_rss(VirtIONet *n);
-
-static void virtio_net_disable_rss(VirtIONet *n)
-{
- if (n->rss_data.enabled) {
- trace_virtio_net_rss_disable();
- }
- n->rss_data.enabled = false;
-
- virtio_net_detach_epbf_rss(n);
-}
-
static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
{
NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
@@ -1329,24 +1280,56 @@ static void virtio_net_detach_epbf_rss(VirtIONet *n)
virtio_net_attach_ebpf_to_backend(n->nic, -1);
}
-static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp)
+static void virtio_net_commit_rss_config(VirtIONet *n)
+{
+ if (n->rss_data.enabled) {
+ n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
+ if (n->rss_data.populate_hash) {
+ virtio_net_detach_epbf_rss(n);
+ } else if (!virtio_net_attach_epbf_rss(n)) {
+ if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
+ warn_report("Can't load eBPF RSS for vhost");
+ } else {
+ warn_report("Can't load eBPF RSS - fallback to software RSS");
+ n->rss_data.enabled_software_rss = true;
+ }
+ }
+
+ trace_virtio_net_rss_enable(n->rss_data.hash_types,
+ n->rss_data.indirections_len,
+ sizeof(n->rss_data.key));
+ } else {
+ virtio_net_detach_epbf_rss(n);
+ trace_virtio_net_rss_disable();
+ }
+}
+
+static void virtio_net_disable_rss(VirtIONet *n)
+{
+ if (!n->rss_data.enabled) {
+ return;
+ }
+
+ n->rss_data.enabled = false;
+ virtio_net_commit_rss_config(n);
+}
+
+static bool virtio_net_load_ebpf_fds(VirtIONet *n)
{
int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1};
int ret = true;
int i = 0;
- ERRP_GUARD();
-
if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) {
- error_setg(errp,
- "Expected %d file descriptors but got %d",
- EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
+ warn_report("Expected %d file descriptors but got %d",
+ EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
return false;
}
for (i = 0; i < n->nr_ebpf_rss_fds; i++) {
- fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp);
- if (*errp) {
+ fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i],
+ &error_warn);
+ if (fds[i] < 0) {
ret = false;
goto exit;
}
@@ -1355,7 +1338,7 @@ static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp)
ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3]);
exit:
- if (!ret || *errp) {
+ if (!ret) {
for (i = 0; i < n->nr_ebpf_rss_fds && fds[i] != -1; i++) {
close(fds[i]);
}
@@ -1364,13 +1347,12 @@ exit:
return ret;
}
-static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp)
+static bool virtio_net_load_ebpf(VirtIONet *n)
{
bool ret = false;
if (virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
- if (!(n->ebpf_rss_fds
- && virtio_net_load_ebpf_fds(n, errp))) {
+ if (!(n->ebpf_rss_fds && virtio_net_load_ebpf_fds(n))) {
ret = ebpf_rss_load(&n->ebpf_rss);
}
}
@@ -1496,28 +1478,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
goto error;
}
n->rss_data.enabled = true;
-
- if (!n->rss_data.populate_hash) {
- if (!virtio_net_attach_epbf_rss(n)) {
- /* EBPF must be loaded for vhost */
- if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
- warn_report("Can't load eBPF RSS for vhost");
- goto error;
- }
- /* fallback to software RSS */
- warn_report("Can't load eBPF RSS - fallback to software RSS");
- n->rss_data.enabled_software_rss = true;
- }
- } else {
- /* use software RSS for hash populating */
- /* and detach eBPF if was loaded before */
- virtio_net_detach_epbf_rss(n);
- n->rss_data.enabled_software_rss = true;
- }
-
- trace_virtio_net_rss_enable(n->rss_data.hash_types,
- n->rss_data.indirections_len,
- temp.b);
+ virtio_net_commit_rss_config(n);
return queue_pairs;
error:
trace_virtio_net_rss_error(err_msg, err_value);
@@ -1869,16 +1830,9 @@ static uint8_t virtio_net_get_hash_type(bool hasip4,
return 0xff;
}
-static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report,
- uint32_t hash)
-{
- struct virtio_net_hdr_v1_hash *hdr = (void *)buf;
- hdr->hash_value = hash;
- hdr->hash_report = report;
-}
-
static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
- size_t size)
+ size_t size,
+ struct virtio_net_hdr_v1_hash *hdr)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
unsigned int index = nc->queue_index, new_index = index;
@@ -1909,7 +1863,8 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
n->rss_data.hash_types);
if (net_hash_type > NetPktRssIpV6UdpEx) {
if (n->rss_data.populate_hash) {
- virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0);
+ hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE;
+ hdr->hash_report = 0;
}
return n->rss_data.redirect ? n->rss_data.default_queue : -1;
}
@@ -1917,7 +1872,8 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
if (n->rss_data.populate_hash) {
- virtio_set_packet_hash(buf, reports[net_hash_type], hash);
+ hdr->hash_value = hash;
+ hdr->hash_report = reports[net_hash_type];
}
if (n->rss_data.redirect) {
@@ -1937,7 +1893,7 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
size_t lens[VIRTQUEUE_MAX_SIZE];
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
- struct virtio_net_hdr_mrg_rxbuf mhdr;
+ struct virtio_net_hdr_v1_hash extra_hdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset, j;
ssize_t err;
@@ -1947,7 +1903,7 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
}
if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) {
- int index = virtio_net_process_rss(nc, buf, size);
+ int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
if (index >= 0) {
NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
return virtio_net_receive_rcu(nc2, buf, size, true);
@@ -2007,15 +1963,17 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
sg, elem->in_num,
- offsetof(typeof(mhdr), num_buffers),
- sizeof(mhdr.num_buffers));
+ offsetof(typeof(extra_hdr), hdr.num_buffers),
+ sizeof(extra_hdr.hdr.num_buffers));
}
receive_header(n, sg, elem->in_num, buf, size);
if (n->rss_data.populate_hash) {
- offset = sizeof(mhdr);
+ offset = offsetof(typeof(extra_hdr), hash_value);
iov_from_buf(sg, elem->in_num, offset,
- buf + offset, n->host_hdr_len - sizeof(mhdr));
+ (char *)&extra_hdr + offset,
+ sizeof(extra_hdr.hash_value) +
+ sizeof(extra_hdr.hash_report));
}
offset = n->host_hdr_len;
total += n->guest_hdr_len;
@@ -2045,10 +2003,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
}
if (mhdr_cnt) {
- virtio_stw_p(vdev, &mhdr.num_buffers, i);
+ virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
- &mhdr.num_buffers, sizeof mhdr.num_buffers);
+ &extra_hdr.hdr.num_buffers,
+ sizeof extra_hdr.hdr.num_buffers);
}
for (j = 0; j < i; j++) {
@@ -2738,7 +2697,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
ssize_t ret;
unsigned int out_num;
struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
- struct virtio_net_hdr_v1_hash vhdr;
+ struct virtio_net_hdr vhdr;
elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
if (!elem) {
@@ -2749,32 +2708,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
out_sg = elem->out_sg;
if (out_num < 1) {
virtio_error(vdev, "virtio-net header not in first element");
- virtqueue_detach_element(q->tx_vq, elem, 0);
- g_free(elem);
- return -EINVAL;
+ goto detach;
}
- if (n->has_vnet_hdr) {
- if (iov_to_buf(out_sg, out_num, 0, &vhdr, n->guest_hdr_len) <
- n->guest_hdr_len) {
+ if (n->needs_vnet_hdr_swap) {
+ if (iov_to_buf(out_sg, out_num, 0, &vhdr, sizeof(vhdr)) <
+ sizeof(vhdr)) {
virtio_error(vdev, "virtio-net header incorrect");
- virtqueue_detach_element(q->tx_vq, elem, 0);
- g_free(elem);
- return -EINVAL;
+ goto detach;
}
- if (n->needs_vnet_hdr_swap) {
- virtio_net_hdr_swap(vdev, (void *) &vhdr);
- sg2[0].iov_base = &vhdr;
- sg2[0].iov_len = n->guest_hdr_len;
- out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
- out_sg, out_num,
- n->guest_hdr_len, -1);
- if (out_num == VIRTQUEUE_MAX_SIZE) {
- goto drop;
- }
- out_num += 1;
- out_sg = sg2;
+ virtio_net_hdr_swap(vdev, &vhdr);
+ sg2[0].iov_base = &vhdr;
+ sg2[0].iov_len = sizeof(vhdr);
+ out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num,
+ sizeof(vhdr), -1);
+ if (out_num == VIRTQUEUE_MAX_SIZE) {
+ goto drop;
}
+ out_num += 1;
+ out_sg = sg2;
}
/*
* If host wants to see the guest header as is, we can
@@ -2791,6 +2743,11 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
n->guest_hdr_len, -1);
out_num = sg_num;
out_sg = sg;
+
+ if (out_num < 1) {
+ virtio_error(vdev, "virtio-net nothing to send");
+ goto detach;
+ }
}
ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
@@ -2811,6 +2768,11 @@ drop:
}
}
return num_packets;
+
+detach:
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
static void virtio_net_tx_timer(void *opaque);
@@ -3120,26 +3082,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
}
}
- if (n->rss_data.enabled) {
- n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
- if (!n->rss_data.populate_hash) {
- if (!virtio_net_attach_epbf_rss(n)) {
- if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
- warn_report("Can't post-load eBPF RSS for vhost");
- } else {
- warn_report("Can't post-load eBPF RSS - "
- "fallback to software RSS");
- n->rss_data.enabled_software_rss = true;
- }
- }
- }
-
- trace_virtio_net_rss_enable(n->rss_data.hash_types,
- n->rss_data.indirections_len,
- sizeof(n->rss_data.key));
- } else {
- trace_virtio_net_rss_disable();
- }
+ virtio_net_commit_rss_config(n);
return 0;
}
@@ -3746,9 +3689,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
n->net_conf.tx_queue_size);
- for (i = 0; i < n->max_queue_pairs; i++) {
- virtio_net_add_queue(n, i);
- }
+ virtio_net_add_queue(n, 0);
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
@@ -3778,9 +3719,6 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) {
- for (i = 0; i < n->max_queue_pairs; i++) {
- qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
- }
n->host_hdr_len = sizeof(struct virtio_net_hdr);
} else {
n->host_hdr_len = 0;
@@ -3812,7 +3750,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
net_rx_pkt_init(&n->rx_pkt);
if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
- virtio_net_load_ebpf(n, errp);
+ virtio_net_load_ebpf(n);
}
}
@@ -3860,6 +3798,42 @@ static void virtio_net_device_unrealize(DeviceState *dev)
virtio_cleanup(vdev);
}
+static void virtio_net_reset(VirtIODevice *vdev)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ int i;
+
+ /* Reset back to compatibility mode */
+ n->promisc = 1;
+ n->allmulti = 0;
+ n->alluni = 0;
+ n->nomulti = 0;
+ n->nouni = 0;
+ n->nobcast = 0;
+ /* multiqueue is disabled by default */
+ n->curr_queue_pairs = 1;
+ timer_del(n->announce_timer.tm);
+ n->announce_timer.round = 0;
+ n->status &= ~VIRTIO_NET_S_ANNOUNCE;
+
+ /* Flush any MAC and VLAN filter table state */
+ n->mac_table.in_use = 0;
+ n->mac_table.first_multi = 0;
+ n->mac_table.multi_overflow = 0;
+ n->mac_table.uni_overflow = 0;
+ memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
+ memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
+ qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
+ memset(n->vlans, 0, MAX_VLAN >> 3);
+
+ /* Flush any async TX */
+ for (i = 0; i < n->max_queue_pairs; i++) {
+ flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
+ }
+
+ virtio_net_disable_rss(n);
+}
+
static void virtio_net_instance_init(Object *obj)
{
VirtIONet *n = VIRTIO_NET(obj);
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 707487c..63a9187 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -2091,8 +2091,6 @@ static void vmxnet3_net_init(VMXNET3State *s)
if (s->peer_has_vhdr) {
qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
sizeof(struct virtio_net_hdr));
-
- qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
}
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);