diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-05-23 06:22:12 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-23 06:22:13 -0700 |
commit | 00f76608a68dcd832c4a1b66694ef3e9cb52912f (patch) | |
tree | 21157ed0a122ca1bc9c64668b8fe0f020b4292f2 /hw/net | |
parent | 886c0453cbf10eebd42a9ccf89c3e46eb389c357 (diff) | |
parent | 792676c165159c11412346870fd58fd243ab2166 (diff) | |
download | qemu-00f76608a68dcd832c4a1b66694ef3e9cb52912f.zip qemu-00f76608a68dcd832c4a1b66694ef3e9cb52912f.tar.gz qemu-00f76608a68dcd832c4a1b66694ef3e9cb52912f.tar.bz2 |
Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging
# -----BEGIN PGP SIGNATURE-----
# Version: GnuPG v1
#
# iQEcBAABAgAGBQJkbGmXAAoJEO8Ells5jWIR4ogH/R5+IgkZi1dwN/IxCpzTIc5H
# l5ncKK6TCqKCfgpFnFFLNKhcDqDczq4LhO42s/vnuOF8vIXcUVhLAz0HULARb46o
# p/7Ufn1k8Zg/HGtWwIW+9CcTkymsHzTOwFcTRFiCjpdkjaW1Wprb2q968f0Px8eS
# cKqC5xln8U+s02KWQMHlJili6BTPuw1ZNnYV3iq/81Me96WOtPd8c8ZSF4aVR2AB
# Kqah+BBOnk4p4kg9Gs0OvM4TffEBrsab8iu4s6SSQGA6ymCWY6GeCX0Ik4u9P1yE
# 6NtKLixBPO4fqLwWxWuKVJmaLKmuEd/FjZXWwITx9EPNtDuBuGLDKuvW8fJxkhw=
# =dw2I
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 23 May 2023 12:21:59 AM PDT
# gpg: using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 215D 46F4 8246 689E C77F 3562 EF04 965B 398D 6211
* tag 'net-pull-request' of https://github.com/jasowang/qemu: (50 commits)
rtl8139: fix large_send_mss divide-by-zero
docs/system/devices/igb: Note igb is tested for DPDK
MAINTAINERS: Add a reviewer for network packet abstractions
vmxnet3: Do not depend on PC
igb: Clear-on-read ICR when ICR.INTA is set
igb: Notify only new interrupts
e1000e: Notify only new interrupts
igb: Implement Tx timestamp
igb: Implement Rx PTP2 timestamp
igb: Implement igb-specific oversize check
igb: Filter with the second VLAN tag for extended VLAN
igb: Strip the second VLAN tag for extended VLAN
igb: Implement Tx SCTP CSO
igb: Implement Rx SCTP CSO
igb: Use UDP for RSS hash
igb: Implement MSI-X single vector mode
tests/qtest/libqos/igb: Set GPIE.Multiple_MSIX
hw/net/net_rx_pkt: Enforce alignment for eth_header
net/eth: Always add VLAN tag
net/eth: Use void pointers
...
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw/net')
-rw-r--r-- | hw/net/Kconfig | 2 | ||||
-rw-r--r-- | hw/net/e1000.c | 46 | ||||
-rw-r--r-- | hw/net/e1000e_core.c | 297 | ||||
-rw-r--r-- | hw/net/e1000e_core.h | 2 | ||||
-rw-r--r-- | hw/net/e1000x_common.c | 82 | ||||
-rw-r--r-- | hw/net/e1000x_common.h | 9 | ||||
-rw-r--r-- | hw/net/e1000x_regs.h | 24 | ||||
-rw-r--r-- | hw/net/igb.c | 10 | ||||
-rw-r--r-- | hw/net/igb_common.h | 24 | ||||
-rw-r--r-- | hw/net/igb_core.c | 722 | ||||
-rw-r--r-- | hw/net/igb_regs.h | 67 | ||||
-rw-r--r-- | hw/net/igbvf.c | 7 | ||||
-rw-r--r-- | hw/net/net_rx_pkt.c | 107 | ||||
-rw-r--r-- | hw/net/net_rx_pkt.h | 38 | ||||
-rw-r--r-- | hw/net/net_tx_pkt.c | 101 | ||||
-rw-r--r-- | hw/net/net_tx_pkt.h | 46 | ||||
-rw-r--r-- | hw/net/rtl8139.c | 3 | ||||
-rw-r--r-- | hw/net/trace-events | 19 | ||||
-rw-r--r-- | hw/net/virtio-net.c | 7 | ||||
-rw-r--r-- | hw/net/vmxnet3.c | 22 |
20 files changed, 890 insertions, 745 deletions
diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 18c7851..98e00be 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -56,7 +56,7 @@ config RTL8139_PCI config VMXNET3_PCI bool - default y if PCI_DEVICES && PC_PCI + default y if PCI_DEVICES depends on PCI config SMC91C111 diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 23d6606..aae5f0b 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -637,9 +637,8 @@ xmit_seg(E1000State *s) e1000x_inc_reg_if_not_full(s->mac_reg, TPT); e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); - s->mac_reg[GPTC] = s->mac_reg[TPT]; - s->mac_reg[GOTCL] = s->mac_reg[TOTL]; - s->mac_reg[GOTCH] = s->mac_reg[TOTH]; + e1000x_inc_reg_if_not_full(s->mac_reg, GPTC); + e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); } static void @@ -805,38 +804,11 @@ start_xmit(E1000State *s) } static int -receive_filter(E1000State *s, const uint8_t *buf, int size) +receive_filter(E1000State *s, const void *buf) { - uint32_t rctl = s->mac_reg[RCTL]; - int isbcast = is_broadcast_ether_addr(buf); - int ismcast = is_multicast_ether_addr(buf); - - if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) && - e1000x_vlan_rx_filter_enabled(s->mac_reg)) { - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci); - uint32_t vfta = - ldl_le_p((uint32_t *)(s->mac_reg + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - return 0; - } - } - - if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */ - return 1; - } - - if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */ - e1000x_inc_reg_if_not_full(s->mac_reg, MPRC); - return 1; - } - - if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */ - e1000x_inc_reg_if_not_full(s->mac_reg, BPRC); - return 1; - } - - return e1000x_rx_group_filter(s->mac_reg, buf); + return (!e1000x_is_vlan_packet(buf, s->mac_reg[VET]) || + e1000x_rx_vlan_filter(s->mac_reg, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(s->mac_reg, buf); } static void @@ -923,6 +895,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size_t desc_offset; size_t desc_size; size_t total_size; + eth_pkt_types_e pkt_type; if (!e1000x_hw_rx_enabled(s->mac_reg)) { return -1; @@ -951,7 +924,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) return size; } - if (!receive_filter(s, filter_buf, size)) { + if (!receive_filter(s, filter_buf)) { return size; } @@ -972,6 +945,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) size -= 4; } + pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf)); rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + e1000x_fcs_len(s->mac_reg); @@ -1037,7 +1011,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) } } while (desc_offset < total_size); - e1000x_update_rx_total_stats(s->mac_reg, size, total_size); + e1000x_update_rx_total_stats(s->mac_reg, pkt_type, size, total_size); n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index c0c09b6..9f185d0 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -165,14 +165,14 @@ e1000e_intrmgr_on_throttling_timer(void *opaque) timer->running = false; - if (msi_enabled(timer->core->owner)) { - trace_e1000e_irq_msi_notify_postponed(); - /* Clear msi_causes_pending to fire MSI eventually */ - timer->core->msi_causes_pending = 0; - e1000e_set_interrupt_cause(timer->core, 0); - } else { - trace_e1000e_irq_legacy_notify_postponed(); - e1000e_set_interrupt_cause(timer->core, 0); + if (timer->core->mac[IMS] & timer->core->mac[ICR]) { + if (msi_enabled(timer->core->owner)) { + trace_e1000e_irq_msi_notify_postponed(); + msi_notify(timer->core->owner, 0); + } else { + trace_e1000e_irq_legacy_notify_postponed(); + e1000e_raise_legacy_irq(timer->core); + } } } @@ -366,10 +366,6 @@ static void e1000e_intrmgr_fire_all_timers(E1000ECore *core) { int i; - uint32_t val = e1000e_intmgr_collect_delayed_causes(core); - - trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); - core->mac[ICR] |= val; if (core->itr.running) { timer_del(core->itr.timer); @@ -537,7 +533,7 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); @@ -546,8 +542,8 @@ e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) ip6info->rss_ex_src_valid))) { if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { - return E1000_MRQ_RSS_TYPE_IPV6TCP; + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { @@ -581,7 +577,7 @@ e1000e_rss_calc_hash(E1000ECore *core, case E1000_MRQ_RSS_TYPE_IPV4TCP: type = NetPktRssIpV4Tcp; break; - case E1000_MRQ_RSS_TYPE_IPV6TCP: + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: type = NetPktRssIpV6TcpEx; break; case E1000_MRQ_RSS_TYPE_IPV6: @@ -711,9 +707,8 @@ e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); } static void @@ -747,7 +742,8 @@ e1000e_process_tx_desc(E1000ECore *core, addr = le64_to_cpu(dp->buffer_addr); if (!tx->skip_cp) { - if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner, + addr, split_size)) { tx->skip_cp = true; } } @@ -765,7 +761,7 @@ e1000e_process_tx_desc(E1000ECore *core, } tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt, core->owner); + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); tx->sum_needed = 0; tx->cptse = 0; @@ -959,6 +955,8 @@ e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { e1000e_set_interrupt_cause(core, cause); } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); } static bool @@ -1034,48 +1032,11 @@ e1000e_rx_l4_cso_enabled(E1000ECore *core) } static bool -e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) +e1000e_receive_filter(E1000ECore *core, const void *buf) { - uint32_t rctl = core->mac[RCTL]; - - if (e1000x_is_vlan_packet(buf, core->mac[VET]) && - e1000x_vlan_rx_filter_enabled(core->mac)) { - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci); - uint32_t vfta = - ldl_le_p((uint32_t *)(core->mac + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - trace_e1000e_rx_flt_vlan_mismatch(vid); - return false; - } else { - trace_e1000e_rx_flt_vlan_match(vid); - } - } - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_UCAST: - if (rctl & E1000_RCTL_UPE) { - return true; /* promiscuous ucast */ - } - break; - - case ETH_PKT_BCAST: - if (rctl & E1000_RCTL_BAM) { - return true; /* broadcast enabled */ - } - break; - - case ETH_PKT_MCAST: - if (rctl & E1000_RCTL_MPE) { - return true; /* promiscuous mcast */ - } - break; - - default: - g_assert_not_reached(); - } - - return e1000x_rx_group_filter(core->mac, buf); + return (!e1000x_is_vlan_packet(buf, core->mac[VET]) || + e1000x_rx_vlan_filter(core->mac, PKT_GET_VLAN_HDR(buf))) && + e1000x_rx_group_filter(core->mac, buf); } static inline void @@ -1149,6 +1110,11 @@ e1000e_verify_csum_in_sw(E1000ECore *core, return; } + if (l4hdr_proto != ETH_L4_HDR_PROTO_TCP && + l4hdr_proto != ETH_L4_HDR_PROTO_UDP) { + return; + } + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { trace_e1000e_rx_metadata_l4_csum_validation_failed(); return; @@ -1281,9 +1247,8 @@ e1000e_build_rx_metadata(E1000ECore *core, trace_e1000e_rx_metadata_l4_cso_disabled(); } - trace_e1000e_rx_metadata_status_flags(*status_flags); - func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); *status_flags = cpu_to_le32(*status_flags); } @@ -1488,24 +1453,10 @@ e1000e_write_to_rx_buffers(E1000ECore *core, } static void -e1000e_update_rx_stats(E1000ECore *core, - size_t data_size, - size_t data_fcs_size) +e1000e_update_rx_stats(E1000ECore *core, size_t pkt_size, size_t pkt_fcs_size) { - e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_BCAST: - e1000x_inc_reg_if_not_full(core->mac, BPRC); - break; - - case ETH_PKT_MCAST: - e1000x_inc_reg_if_not_full(core->mac, MPRC); - break; - - default: - break; - } + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); } static inline bool @@ -1700,12 +1651,9 @@ static ssize_t e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, bool has_vnet) { - static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4); - - uint32_t n = 0; - uint8_t min_buf[ETH_ZLEN]; + uint32_t causes = 0; + uint8_t buf[ETH_ZLEN]; struct iovec min_iov; - uint8_t *filter_buf; size_t size, orig_size; size_t iov_ofs = 0; E1000E_RxRing rxr; @@ -1728,24 +1676,21 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, net_rx_pkt_unset_vhdr(core->rx_pkt); } - filter_buf = iov->iov_base + iov_ofs; orig_size = iov_size(iov, iovcnt); size = orig_size - iov_ofs; /* Pad to minimum Ethernet frame length */ - if (size < sizeof(min_buf)) { - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); - memset(&min_buf[size], 0, sizeof(min_buf) - size); + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, buf, size); + memset(&buf[size], 0, sizeof(buf) - size); e1000x_inc_reg_if_not_full(core->mac, RUC); - min_iov.iov_base = filter_buf = min_buf; - min_iov.iov_len = size = sizeof(min_buf); + min_iov.iov_base = buf; + min_iov.iov_len = size = sizeof(buf); iovcnt = 1; iov = &min_iov; iov_ofs = 0; - } else if (iov->iov_len < maximum_ethernet_hdr_len) { - /* This is very unlikely, but may happen. */ - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); - filter_buf = min_buf; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, buf, ETH_HLEN + 4); } /* Discard oversized packets if !LPE and !SBP. */ @@ -1754,15 +1699,16 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, } net_rx_pkt_set_packet_type(core->rx_pkt, - get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); + get_eth_packet_type(PKT_GET_ETH_HDR(buf))); - if (!e1000e_receive_filter(core, filter_buf, size)) { + if (!e1000e_receive_filter(core, buf)) { trace_e1000e_rx_flt_dropped(); return orig_size; } net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - e1000x_vlan_enabled(core->mac), core->mac[VET]); + e1000x_vlan_enabled(core->mac) ? 0 : -1, + core->mac[VET], 0); e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); e1000e_rx_ring_init(core, &rxr, rss_info.queue); @@ -1779,32 +1725,32 @@ e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, /* Perform small receive detection (RSRPD) */ if (total_size < core->mac[RSRPD]) { - n |= E1000_ICS_SRPD; + causes |= E1000_ICS_SRPD; } /* Perform ACK receive detection */ if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && (e1000e_is_tcp_ack(core, core->rx_pkt))) { - n |= E1000_ICS_ACK; + causes |= E1000_ICS_ACK; } /* Check if receive descriptor minimum threshold hit */ rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); - n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); + causes |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); trace_e1000e_rx_written_to_guest(rxr.i->idx); } else { - n |= E1000_ICS_RXO; + causes |= E1000_ICS_RXO; retval = 0; trace_e1000e_rx_not_written_to_guest(rxr.i->idx); } - if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { - trace_e1000e_rx_interrupt_set(n); - e1000e_set_interrupt_cause(core, n); + if (!e1000e_intrmgr_delay_rx_causes(core, &causes)) { + trace_e1000e_rx_interrupt_set(causes); + e1000e_set_interrupt_cause(core, causes); } else { - trace_e1000e_rx_interrupt_delayed(n); + trace_e1000e_rx_interrupt_delayed(causes); } return retval; @@ -2024,13 +1970,6 @@ void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) } }; -static inline void -e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) -{ - trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); - core->mac[IMS] &= ~bits; -} - static inline bool e1000e_postpone_interrupt(E1000IntrDelayTimer *timer) { @@ -2088,7 +2027,6 @@ e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) effective_eiac = core->mac[EIAC] & cause; core->mac[ICR] &= ~effective_eiac; - core->msi_causes_pending &= ~effective_eiac; if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { core->mac[IMS] &= ~effective_eiac; @@ -2180,33 +2118,17 @@ e1000e_fix_icr_asserted(E1000ECore *core) trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); } -static void -e1000e_send_msi(E1000ECore *core, bool msix) +static void e1000e_raise_interrupts(E1000ECore *core, + size_t index, uint32_t causes) { - uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; - - core->msi_causes_pending &= causes; - causes ^= core->msi_causes_pending; - if (causes == 0) { - return; - } - core->msi_causes_pending |= causes; + bool is_msix = msix_enabled(core->owner); + uint32_t old_causes = core->mac[IMS] & core->mac[ICR]; + uint32_t raised_causes; - if (msix) { - e1000e_msix_notify(core, causes); - } else { - if (!e1000e_itr_should_postpone(core)) { - trace_e1000e_irq_msi_notify(causes); - msi_notify(core->owner, 0); - } - } -} + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); -static void -e1000e_update_interrupt_state(E1000ECore *core) -{ - bool interrupts_pending; - bool is_msix = msix_enabled(core->owner); + core->mac[index] |= causes; /* Set ICR[OTHER] for MSI-X */ if (is_msix) { @@ -2228,40 +2150,58 @@ e1000e_update_interrupt_state(E1000ECore *core) */ core->mac[ICS] = core->mac[ICR]; - interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; - if (!interrupts_pending) { - core->msi_causes_pending = 0; - } - trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], core->mac[ICR], core->mac[IMS]); - if (is_msix || msi_enabled(core->owner)) { - if (interrupts_pending) { - e1000e_send_msi(core, is_msix); - } - } else { - if (interrupts_pending) { - if (!e1000e_itr_should_postpone(core)) { - e1000e_raise_legacy_irq(core); - } + raised_causes = core->mac[IMS] & core->mac[ICR] & ~old_causes; + if (!raised_causes) { + return; + } + + if (is_msix) { + e1000e_msix_notify(core, raised_causes & ~E1000_ICR_ASSERTED); + } else if (!e1000e_itr_should_postpone(core)) { + if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); } else { - e1000e_lower_legacy_irq(core); + e1000e_raise_legacy_irq(core); } } } -static void -e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) +static void e1000e_lower_interrupts(E1000ECore *core, + size_t index, uint32_t causes) { - trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); - val |= e1000e_intmgr_collect_delayed_causes(core); - core->mac[ICR] |= val; + core->mac[index] &= ~causes; + + /* + * Make sure ICR and ICS registers have the same value. + * The spec says that the ICS register is write-only. However in practice, + * on real hardware ICS is readable, and for reads it has the same value as + * ICR (except that ICS does not have the clear on read behaviour of ICR). + * + * The VxWorks PRO/1000 driver uses this behaviour. + */ + core->mac[ICS] = core->mac[ICR]; + + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); - trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); + if (!(core->mac[IMS] & core->mac[ICR]) && + !msix_enabled(core->owner) && !msi_enabled(core->owner)) { + e1000e_lower_legacy_irq(core); + } +} - e1000e_update_interrupt_state(core); +static void +e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) +{ + val |= e1000e_intmgr_collect_delayed_causes(core); + e1000e_raise_interrupts(core, ICR, val); } static inline void @@ -2527,30 +2467,27 @@ e1000e_set_ics(E1000ECore *core, int index, uint32_t val) static void e1000e_set_icr(E1000ECore *core, int index, uint32_t val) { - uint32_t icr = 0; if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - icr = core->mac[ICR] & ~val; /* * Windows driver expects that the "receive overrun" bit and other * ones to be cleared when the "Other" bit (#24) is cleared. */ - icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; - trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); - core->mac[ICR] = icr; - e1000e_update_interrupt_state(core); + if (val & E1000_ICR_OTHER) { + val |= E1000_ICR_OTHER_CAUSES; + } + e1000e_lower_interrupts(core, ICR, val); } static void e1000e_set_imc(E1000ECore *core, int index, uint32_t val) { trace_e1000e_irq_ims_clear_set_imc(val); - e1000e_clear_ims_bits(core, val); - e1000e_update_interrupt_state(core); + e1000e_lower_interrupts(core, IMS, val); } static void @@ -2571,9 +2508,6 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) uint32_t valid_val = val & ims_valid_mask; - trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); - core->mac[IMS] |= valid_val; - if ((valid_val & ims_ext_mask) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && msix_enabled(core->owner)) { @@ -2586,7 +2520,7 @@ e1000e_set_ims(E1000ECore *core, int index, uint32_t val) e1000e_intrmgr_fire_all_timers(core); } - e1000e_update_interrupt_state(core); + e1000e_raise_interrupts(core, IMS, valid_val); } static void @@ -2659,28 +2593,25 @@ static uint32_t e1000e_mac_icr_read(E1000ECore *core, int index) { uint32_t ret = core->mac[ICR]; - trace_e1000e_irq_icr_read_entry(ret); if (core->mac[IMS] == 0) { trace_e1000e_irq_icr_clear_zero_ims(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if (!msix_enabled(core->owner)) { trace_e1000e_irq_icr_clear_nonmsix_icr_read(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); } if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_clear_iame(); - core->mac[ICR] = 0; + e1000e_lower_interrupts(core, ICR, 0xffffffff); trace_e1000e_irq_icr_process_iame(); - e1000e_clear_ims_bits(core, core->mac[IAM]); + e1000e_lower_interrupts(core, IMS, core->mac[IAM]); } - trace_e1000e_irq_icr_read_exit(core->mac[ICR]); - e1000e_update_interrupt_state(core); return ret; } @@ -3422,7 +3353,7 @@ e1000e_core_pci_realize(E1000ECore *core, qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS); + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } net_rx_pkt_init(&core->rx_pkt); @@ -3447,7 +3378,6 @@ e1000e_core_pci_uninit(E1000ECore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < E1000E_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -3572,7 +3502,6 @@ static void e1000e_reset(E1000ECore *core, bool sw) e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); for (i = 0; i < ARRAY_SIZE(core->tx); i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); core->tx[i].skip_cp = false; } diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h index 213a705..66b025c 100644 --- a/hw/net/e1000e_core.h +++ b/hw/net/e1000e_core.h @@ -111,8 +111,6 @@ struct E1000Core { PCIDevice *owner; void (*owner_start_recv)(PCIDevice *d); - uint32_t msi_causes_pending; - int64_t timadj; }; diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c index b844af5..212873f 100644 --- a/hw/net/e1000x_common.c +++ b/hw/net/e1000x_common.c @@ -58,33 +58,64 @@ bool e1000x_is_vlan_packet(const void *buf, uint16_t vet) return res; } -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf) +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr) +{ + if (e1000x_vlan_rx_filter_enabled(mac)) { + uint16_t vid = lduw_be_p(&vhdr->h_tci); + uint32_t vfta = + ldl_le_p((uint32_t *)(mac + VFTA) + + ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); + if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { + trace_e1000x_rx_flt_vlan_mismatch(vid); + return false; + } + + trace_e1000x_rx_flt_vlan_match(vid); + } + + return true; +} + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr) { static const int mta_shift[] = { 4, 3, 2, 0 }; uint32_t f, ra[2], *rp, rctl = mac[RCTL]; + if (is_broadcast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_BAM) { + return true; + } + } else if (is_multicast_ether_addr(ehdr->h_dest)) { + if (rctl & E1000_RCTL_MPE) { + return true; + } + } else { + if (rctl & E1000_RCTL_UPE) { + return true; + } + } + for (rp = mac + RA; rp < mac + RA + 32; rp += 2) { if (!(rp[1] & E1000_RAH_AV)) { continue; } ra[0] = cpu_to_le32(rp[0]); ra[1] = cpu_to_le32(rp[1]); - if (!memcmp(buf, (uint8_t *)ra, ETH_ALEN)) { + if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) { trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2, - MAC_ARG(buf)); + MAC_ARG(ehdr->h_dest)); return true; } } - trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(buf)); + trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(ehdr->h_dest)); f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; - f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff; + f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff; if (mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) { - e1000x_inc_reg_if_not_full(mac, MPRC); return true; } - trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(buf), + trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(ehdr->h_dest), (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5, mac[MTA + (f >> 5)]); @@ -109,16 +140,16 @@ bool e1000x_hw_rx_enabled(uint32_t *mac) bool e1000x_is_oversized(uint32_t *mac, size_t size) { + size_t header_size = sizeof(struct eth_header) + sizeof(struct vlan_header); /* this is the size past which hardware will drop packets when setting LPE=0 */ - static const int maximum_ethernet_vlan_size = 1522; + size_t maximum_short_size = header_size + ETH_MTU; /* this is the size past which hardware will drop packets when setting LPE=1 */ - static const int maximum_ethernet_lpe_size = 16 * KiB; + size_t maximum_large_size = 16 * KiB - ETH_FCS_LEN; - if ((size > maximum_ethernet_lpe_size || - (size > maximum_ethernet_vlan_size - && !(mac[RCTL] & E1000_RCTL_LPE))) + if ((size > maximum_large_size || + (size > maximum_short_size && !(mac[RCTL] & E1000_RCTL_LPE))) && !(mac[RCTL] & E1000_RCTL_SBP)) { e1000x_inc_reg_if_not_full(mac, ROC); trace_e1000x_rx_oversized(size); @@ -212,23 +243,36 @@ e1000x_rxbufsize(uint32_t rctl) void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size) + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size) { static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511, PRC1023, PRC1522 }; - e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); + e1000x_increase_size_stats(mac, PRCregs, pkt_fcs_size); e1000x_inc_reg_if_not_full(mac, TPR); - mac[GPRC] = mac[TPR]; + e1000x_inc_reg_if_not_full(mac, GPRC); /* TOR - Total Octets Received: * This register includes bytes received in a packet from the <Destination * Address> field through the <CRC> field, inclusively. * Always include FCS length (4) in size. */ - e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); - mac[GORCL] = mac[TORL]; - mac[GORCH] = mac[TORH]; + e1000x_grow_8reg_if_not_full(mac, TORL, pkt_size + 4); + e1000x_grow_8reg_if_not_full(mac, GORCL, pkt_size + 4); + + switch (pkt_type) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(mac, BPRC); + break; + + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(mac, MPRC); + break; + + default: + break; + } } void diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h index 911abd8..be29168 100644 --- a/hw/net/e1000x_common.h +++ b/hw/net/e1000x_common.h @@ -91,8 +91,9 @@ e1000x_update_regs_on_link_up(uint32_t *mac, uint16_t *phy) } void e1000x_update_rx_total_stats(uint32_t *mac, - size_t data_size, - size_t data_fcs_size); + eth_pkt_types_e pkt_type, + size_t pkt_size, + size_t pkt_fcs_size); void e1000x_core_prepare_eeprom(uint16_t *eeprom, const uint16_t *templ, @@ -106,7 +107,9 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac); bool e1000x_is_vlan_packet(const void *buf, uint16_t vet); -bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf); +bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr); + +bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr); bool e1000x_hw_rx_enabled(uint32_t *mac); diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h index 6d3c4c6..13760c6 100644 --- a/hw/net/e1000x_regs.h +++ b/hw/net/e1000x_regs.h @@ -290,18 +290,18 @@ #define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1)) #define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)]) -#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) -#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) -#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18)) -#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) -#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) - -#define E1000_MRQ_RSS_TYPE_NONE (0) -#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) -#define E1000_MRQ_RSS_TYPE_IPV4 (2) -#define E1000_MRQ_RSS_TYPE_IPV6TCP (3) -#define E1000_MRQ_RSS_TYPE_IPV6EX (4) -#define E1000_MRQ_RSS_TYPE_IPV6 (5) +#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) +#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) +#define E1000_MRQC_EN_TCPIPV6EX(mrqc) ((mrqc) & BIT(18)) +#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) +#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) + +#define E1000_MRQ_RSS_TYPE_NONE (0) +#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) +#define E1000_MRQ_RSS_TYPE_IPV4 (2) +#define E1000_MRQ_RSS_TYPE_IPV6TCPEX (3) +#define E1000_MRQ_RSS_TYPE_IPV6EX (4) +#define E1000_MRQ_RSS_TYPE_IPV6 (5) #define E1000_ICR_ASSERTED BIT(31) #define E1000_EIAC_MASK 0x01F00000 diff --git a/hw/net/igb.c b/hw/net/igb.c index 51a7e91..1c989d7 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -433,16 +433,16 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp) pcie_ari_init(pci_dev, 0x150, 1); - pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, "igbvf", + pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF, IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS, IGB_VF_OFFSET, IGB_VF_STRIDE); - pcie_sriov_pf_init_vf_bar(pci_dev, 0, + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, - 16 * KiB); - pcie_sriov_pf_init_vf_bar(pci_dev, 3, + IGBVF_MMIO_SIZE); + pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MSIX_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, - 16 * KiB); + IGBVF_MSIX_SIZE); igb_init_net_peer(s, pci_dev, macaddr); diff --git a/hw/net/igb_common.h b/hw/net/igb_common.h index 69ac490..5c261ba 100644 --- a/hw/net/igb_common.h +++ b/hw/net/igb_common.h @@ -28,6 +28,14 @@ #include "igb_regs.h" +#define TYPE_IGBVF "igbvf" + +#define IGBVF_MMIO_BAR_IDX (0) +#define IGBVF_MSIX_BAR_IDX (3) + +#define IGBVF_MMIO_SIZE (16 * 1024) +#define IGBVF_MSIX_SIZE (16 * 1024) + #define defreg(x) x = (E1000_##x >> 2) #define defreg_indexed(x, i) x##i = (E1000_##x(i) >> 2) #define defreg_indexeda(x, i) x##i##_A = (E1000_##x##_A(i) >> 2) @@ -43,7 +51,7 @@ defreg_indexeda(x, 0), defreg_indexeda(x, 1), \ defreg_indexeda(x, 2), defreg_indexeda(x, 3) -#define defregv(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ +#define defreg8(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \ defreg_indexed(x, 2), defreg_indexed(x, 3), \ defreg_indexed(x, 4), defreg_indexed(x, 5), \ defreg_indexed(x, 6), defreg_indexed(x, 7) @@ -114,6 +122,8 @@ enum { defreg(EICS), defreg(EIMS), defreg(EIMC), defreg(EIAM), defreg(EICR), defreg(IVAR_MISC), defreg(GPIE), + defreg(TSYNCRXCFG), defreg8(ETQF), + defreg(RXPBS), defregd(RDBAL), defregd(RDBAH), defregd(RDLEN), defregd(SRRCTL), defregd(RDH), defregd(RDT), defregd(RXDCTL), defregd(RXCTL), defregd(RQDPC), defreg(RA2), @@ -125,15 +135,15 @@ enum { defreg(VT_CTL), - defregv(P2VMAILBOX), defregv(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR), + defreg8(P2VMAILBOX), defreg8(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR), defreg(VFLRE), defreg(VFRE), defreg(VFTE), defreg(WVBR), defreg(QDE), defreg(DTXSWC), defreg_indexed(VLVF, 0), - defregv(VMOLR), defreg(RPLOLR), defregv(VMBMEM), defregv(VMVIR), + defreg8(VMOLR), defreg(RPLOLR), defreg8(VMBMEM), defreg8(VMVIR), - defregv(PVTCTRL), defregv(PVTEICS), defregv(PVTEIMS), defregv(PVTEIMC), - defregv(PVTEIAC), defregv(PVTEIAM), defregv(PVTEICR), defregv(PVFGPRC), - defregv(PVFGPTC), defregv(PVFGORC), defregv(PVFGOTC), defregv(PVFMPRC), - defregv(PVFGPRLBC), defregv(PVFGPTLBC), defregv(PVFGORLBC), defregv(PVFGOTLBC), + defreg8(PVTCTRL), defreg8(PVTEICS), defreg8(PVTEIMS), defreg8(PVTEIMC), + defreg8(PVTEIAC), defreg8(PVTEIAM), defreg8(PVTEICR), defreg8(PVFGPRC), + defreg8(PVFGPTC), defreg8(PVFGORC), defreg8(PVFGOTC), defreg8(PVFMPRC), + defreg8(PVFGPRLBC), defreg8(PVFGPTLBC), defreg8(PVFGORLBC), defreg8(PVFGOTLBC), defreg(MTA_A), diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index d733fed..d00b1ca 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -67,14 +67,34 @@ typedef struct IGBTxPktVmdqCallbackContext { NetClientState *nc; } IGBTxPktVmdqCallbackContext; +typedef struct L2Header { + struct eth_header eth; + struct vlan_header vlan[2]; +} L2Header; + +typedef struct PTP2 { + uint8_t message_id_transport_specific; + uint8_t version_ptp; + uint16_t message_length; + uint8_t subdomain_number; + uint8_t reserved0; + uint16_t flags; + uint64_t correction; + uint8_t reserved1[5]; + uint8_t source_communication_technology; + uint32_t source_uuid_lo; + uint16_t source_uuid_hi; + uint16_t source_port_id; + uint16_t sequence_id; + uint8_t control; + uint8_t log_message_period; +} PTP2; + static ssize_t igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, bool has_vnet, bool *external_tx); -static inline void -igb_set_interrupt_cause(IGBCore *core, uint32_t val); - -static void igb_update_interrupt_state(IGBCore *core); +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes); static void igb_reset(IGBCore *core, bool sw); static inline void @@ -92,23 +112,31 @@ igb_lower_legacy_irq(IGBCore *core) pci_set_irq(core->owner, 0); } -static void igb_msix_notify(IGBCore *core, unsigned int vector) +static void igb_msix_notify(IGBCore *core, unsigned int cause) { PCIDevice *dev = core->owner; uint16_t vfn; + uint32_t effective_eiac; + unsigned int vector; - vfn = 8 - (vector + 2) / IGBVF_MSIX_VEC_NUM; + vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM; if (vfn < pcie_sriov_num_vfs(core->owner)) { dev = pcie_sriov_get_vf_at_index(core->owner, vfn); assert(dev); - vector = (vector + 2) % IGBVF_MSIX_VEC_NUM; - } else if (vector >= IGB_MSIX_VEC_NUM) { + vector = (cause + 2) % IGBVF_MSIX_VEC_NUM; + } else if (cause >= IGB_MSIX_VEC_NUM) { qemu_log_mask(LOG_GUEST_ERROR, "igb: Tried to use vector unavailable for PF"); return; + } else { + vector = cause; } msix_notify(dev, vector); + + trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); + effective_eiac = core->mac[EIAC] & BIT(cause); + core->mac[EICR] &= ~effective_eiac; } static inline void @@ -274,6 +302,11 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) return E1000_MRQ_RSS_TYPE_IPV4TCP; } + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV4UDP; + } + if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4; } @@ -296,7 +329,7 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); @@ -305,8 +338,13 @@ igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) ip6info->rss_ex_src_valid))) { if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && - E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { - return E1000_MRQ_RSS_TYPE_IPV6TCP; + E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCPEX; + } + + if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP && + (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) { + return E1000_MRQ_RSS_TYPE_IPV6UDP; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { @@ -338,7 +376,7 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) case E1000_MRQ_RSS_TYPE_IPV4TCP: type = NetPktRssIpV4Tcp; break; - case E1000_MRQ_RSS_TYPE_IPV6TCP: + case E1000_MRQ_RSS_TYPE_IPV6TCPEX: type = NetPktRssIpV6TcpEx; break; case E1000_MRQ_RSS_TYPE_IPV6: @@ -347,6 +385,12 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) case E1000_MRQ_RSS_TYPE_IPV6EX: type = NetPktRssIpV6Ex; break; + case E1000_MRQ_RSS_TYPE_IPV4UDP: + type = NetPktRssIpV4Udp; + break; + case E1000_MRQ_RSS_TYPE_IPV6UDP: + type = NetPktRssIpV6Udp; + break; default: assert(false); return 0; @@ -402,7 +446,7 @@ igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, } } - if (insert_vlan && e1000x_vlan_enabled(core->mac)) { + if (insert_vlan) { net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, core->mac[VET] & 0xffff); } @@ -411,9 +455,10 @@ igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, static bool igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) { + uint32_t idx = (tx->first_olinfo_status >> 4) & 1; + if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) { - uint32_t idx = (tx->first_olinfo_status >> 4) & 1; - uint32_t mss = tx->ctx[idx].mss_l4len_idx >> 16; + uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT; if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) { return false; } @@ -423,10 +468,11 @@ igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) return true; } - if (tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) { - if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { - return false; - } + if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) && + !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ? + net_tx_pkt_update_sctp_checksum(tx->tx_pkt) : + net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) { + return false; } if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) { @@ -538,9 +584,8 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn) g_assert_not_reached(); } - core->mac[GPTC] = core->mac[TPT]; - core->mac[GOTCL] = core->mac[TOTL]; - core->mac[GOTCH] = core->mac[TOTH]; + e1000x_inc_reg_if_not_full(core->mac, GPTC); + e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); if (core->mac[MRQC] & 1) { uint16_t pool = qn % IGB_NUM_VM_POOLS; @@ -598,7 +643,8 @@ igb_process_tx_desc(IGBCore *core, length = cmd_type_len & 0xFFFF; if (!tx->skip_cp) { - if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, buffer_addr, length)) { + if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev, + buffer_addr, length)) { tx->skip_cp = true; } } @@ -607,8 +653,15 @@ igb_process_tx_desc(IGBCore *core, if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { idx = (tx->first_olinfo_status >> 4) & 1; igb_tx_insert_vlan(core, queue_index, tx, - tx->ctx[idx].vlan_macip_lens >> 16, - !!(cmd_type_len & E1000_TXD_CMD_VLE)); + tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT, + !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE)); + + if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) && + (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) && + !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) { + core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID; + e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH); + } if (igb_tx_pkt_send(core, tx, queue_index)) { igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index); @@ -617,7 +670,7 @@ igb_process_tx_desc(IGBCore *core, tx->first = true; tx->skip_cp = false; - net_tx_pkt_reset(tx->tx_pkt, dev); + net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev); } } @@ -843,8 +896,6 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) d = core->owner; } - net_tx_pkt_reset(txr->tx->tx_pkt, d); - while (!igb_ring_empty(core, txi)) { base = igb_ring_head_descr(core, txi); @@ -859,9 +910,11 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) } if (eic) { - core->mac[EICR] |= eic; - igb_set_interrupt_cause(core, E1000_ICR_TXDW); + igb_raise_interrupts(core, EICR, eic); + igb_raise_interrupts(core, ICR, E1000_ICR_TXDW); } + + net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d); } static uint32_t @@ -949,49 +1002,83 @@ igb_rx_l4_cso_enabled(IGBCore *core) return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); } -static bool -igb_rx_is_oversized(IGBCore *core, uint16_t qn, size_t size) +static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr, + size_t size, size_t vlan_num, + bool lpe, uint16_t rlpml) { - uint16_t pool = qn % IGB_NUM_VM_POOLS; - bool lpe = !!(core->mac[VMOLR0 + pool] & E1000_VMOLR_LPE); - int max_ethernet_lpe_size = - core->mac[VMOLR0 + pool] & E1000_VMOLR_RLPML_MASK; - int max_ethernet_vlan_size = 1522; - - return size > (lpe ? max_ethernet_lpe_size : max_ethernet_vlan_size); + size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num; + size_t header_size = sizeof(struct eth_header) + vlan_header_size; + return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU; } -static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, - size_t size, E1000E_RSSInfo *rss_info, - bool *external_tx) +static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov, + size_t iovcnt, size_t iov_ofs, + const L2Header *l2_header, size_t size, + E1000E_RSSInfo *rss_info, + uint16_t *etqf, bool *ts, bool *external_tx) { static const int ta_shift[] = { 4, 3, 2, 0 }; + const struct eth_header *ehdr = &l2_header->eth; uint32_t f, ra[2], *macp, rctl = core->mac[RCTL]; uint16_t queues = 0; uint16_t oversized = 0; - uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(ehdr)->h_tci) & VLAN_VID_MASK; - bool accepted = false; + size_t vlan_num = 0; + PTP2 ptp2; + bool lpe; + uint16_t rlpml; int i; memset(rss_info, 0, sizeof(E1000E_RSSInfo)); + *ts = false; if (external_tx) { *external_tx = true; } - if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff) && - e1000x_vlan_rx_filter_enabled(core->mac)) { - uint32_t vfta = - ldl_le_p((uint32_t *)(core->mac + VFTA) + - ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); - if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { - trace_e1000e_rx_flt_vlan_mismatch(vid); - return queues; - } else { - trace_e1000e_rx_flt_vlan_match(vid); + if (core->mac[CTRL_EXT] & BIT(26)) { + if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 && + be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 2; + } + } else { + if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) { + vlan_num = 1; } } + lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE); + rlpml = core->mac[RLPML]; + if (!(core->mac[RCTL] & E1000_RCTL_SBP) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) { + trace_e1000x_rx_oversized(size); + return queues; + } + + for (*etqf = 0; *etqf < 8; (*etqf)++) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) && + be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) { + if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) && + (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) && + !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) && + iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) && + (ptp2.version_ptp & 15) == 2 && + ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) { + e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH); + *ts = true; + core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID; + core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo); + core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) | + (le16_to_cpu(ptp2.sequence_id) << 16); + } + break; + } + } + + if (vlan_num && + !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) { + return queues; + } + if (core->mac[MRQC] & 1) { if (is_broadcast_ether_addr(ehdr->h_dest)) { for (i = 0; i < IGB_NUM_VM_POOLS; i++) { @@ -1042,7 +1129,9 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, if (e1000x_vlan_rx_filter_enabled(core->mac)) { uint16_t mask = 0; - if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff)) { + if (vlan_num) { + uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK; + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid && (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) { @@ -1070,7 +1159,11 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, queues &= core->mac[VFRE]; if (queues) { for (i = 0; i < IGB_NUM_VM_POOLS; i++) { - if ((queues & BIT(i)) && igb_rx_is_oversized(core, i, size)) { + lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE); + rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK; + if ((queues & BIT(i)) && + igb_rx_is_oversized(core, ehdr, size, vlan_num, + lpe, rlpml)) { oversized |= BIT(i); } } @@ -1097,33 +1190,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr, } } } else { - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_UCAST: - if (rctl & E1000_RCTL_UPE) { - accepted = true; /* promiscuous ucast */ - } - break; - - case ETH_PKT_BCAST: - if (rctl & E1000_RCTL_BAM) { - accepted = true; /* broadcast enabled */ - } - break; - - case ETH_PKT_MCAST: - if (rctl & E1000_RCTL_MPE) { - accepted = true; /* promiscuous mcast */ - } - break; - - default: - g_assert_not_reached(); - } - - if (!accepted) { - accepted = e1000x_rx_group_filter(core->mac, ehdr->h_dest); - } - + bool accepted = e1000x_rx_group_filter(core->mac, ehdr); if (!accepted) { for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { if (!(macp[1] & E1000_RAH_AV)) { @@ -1217,7 +1284,7 @@ static void igb_build_rx_metadata(IGBCore *core, struct NetRxPkt *pkt, bool is_eop, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t *pkt_info, uint16_t *hdr_info, uint32_t *rss, uint32_t *status_flags, @@ -1225,9 +1292,8 @@ igb_build_rx_metadata(IGBCore *core, uint16_t *vlan_tag) { struct virtio_net_hdr *vhdr; - bool hasip4, hasip6; + bool hasip4, hasip6, csum_valid; EthL4HdrProto l4hdr_proto; - uint32_t pkt_type; *status_flags = E1000_RXD_STAT_DD; @@ -1266,34 +1332,47 @@ igb_build_rx_metadata(IGBCore *core, trace_e1000e_rx_metadata_ack(); } - if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { - trace_e1000e_rx_metadata_ipv6_filtering_disabled(); - pkt_type = E1000_RXD_PKT_MAC; - } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || - l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { - pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; - } else if (hasip4 || hasip6) { - pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; - } else { - pkt_type = E1000_RXD_PKT_MAC; - } + if (pkt_info) { + *pkt_info = rss_info->enabled ? rss_info->type : 0; - trace_e1000e_rx_metadata_pkt_type(pkt_type); + if (etqf < 8) { + *pkt_info |= (BIT(11) | etqf) << 4; + } else { + if (hasip4) { + *pkt_info |= E1000_ADVRXD_PKT_IP4; + } - if (pkt_info) { - if (rss_info->enabled) { - *pkt_info = rss_info->type; - } + if (hasip6) { + *pkt_info |= E1000_ADVRXD_PKT_IP6; + } - *pkt_info |= (pkt_type << 4); - } else { - *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); + switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_TCP: + *pkt_info |= E1000_ADVRXD_PKT_TCP; + break; + + case ETH_L4_HDR_PROTO_UDP: + *pkt_info |= E1000_ADVRXD_PKT_UDP; + break; + + case ETH_L4_HDR_PROTO_SCTP: + *pkt_info |= E1000_ADVRXD_PKT_SCTP; + break; + + default: + break; + } + } } if (hdr_info) { *hdr_info = 0; } + if (ts) { + *status_flags |= BIT(16); + } + /* RX CSO information */ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { trace_e1000e_rx_metadata_ipv6_sum_disabled(); @@ -1317,6 +1396,15 @@ igb_build_rx_metadata(IGBCore *core, if (igb_rx_l4_cso_enabled(core)) { switch (l4hdr_proto) { + case ETH_L4_HDR_PROTO_SCTP: + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l4_csum_validation_failed(); + goto func_exit; + } + if (!csum_valid) { + *status_flags |= E1000_RXDEXT_STATERR_TCPE; + } + /* fall through */ case ETH_L4_HDR_PROTO_TCP: *status_flags |= E1000_RXD_STAT_TCPCS; break; @@ -1326,22 +1414,21 @@ igb_build_rx_metadata(IGBCore *core, break; default: - goto func_exit; + break; } } else { trace_e1000e_rx_metadata_l4_cso_disabled(); } - trace_e1000e_rx_metadata_status_flags(*status_flags); - func_exit: + trace_e1000e_rx_metadata_status_flags(*status_flags); *status_flags = cpu_to_le32(*status_flags); } static inline void igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, struct NetRxPkt *pkt, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t length) { uint32_t status_flags, rss; @@ -1352,7 +1439,7 @@ igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, desc->csum = 0; igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, + rss_info, etqf, ts, NULL, NULL, &rss, &status_flags, &ip_id, &desc->special); @@ -1363,7 +1450,7 @@ igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, static inline void igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, struct NetRxPkt *pkt, - const E1000E_RSSInfo *rss_info, + const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts, uint16_t length) { memset(&desc->wb, 0, sizeof(desc->wb)); @@ -1371,7 +1458,7 @@ igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, desc->wb.upper.length = cpu_to_le16(length); igb_build_rx_metadata(core, pkt, pkt != NULL, - rss_info, + rss_info, etqf, ts, &desc->wb.lower.lo_dword.pkt_info, &desc->wb.lower.lo_dword.hdr_info, &desc->wb.lower.hi_dword.rss, @@ -1382,12 +1469,15 @@ igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, static inline void igb_write_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc, -struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length) + struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts, uint16_t length) { if (igb_rx_use_legacy_descriptor(core)) { - igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, length); + igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, + etqf, ts, length); } else { - igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, length); + igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, + etqf, ts, length); } } @@ -1438,29 +1528,17 @@ igb_write_to_rx_buffers(IGBCore *core, static void igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi, - size_t data_size, size_t data_fcs_size) + size_t pkt_size, size_t pkt_fcs_size) { - e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); - - switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { - case ETH_PKT_BCAST: - e1000x_inc_reg_if_not_full(core->mac, BPRC); - break; - - case ETH_PKT_MCAST: - e1000x_inc_reg_if_not_full(core->mac, MPRC); - break; - - default: - break; - } + eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); + e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); if (core->mac[MRQC] & 1) { uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS; - core->mac[PVFGORC0 + (pool * 64)] += data_size + 4; + core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4; core->mac[PVFGPRC0 + (pool * 64)]++; - if (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) { + if (pkt_type == ETH_PKT_MCAST) { core->mac[PVFMPRC0 + (pool * 64)]++; } } @@ -1476,7 +1554,8 @@ igb_rx_descr_threshold_hit(IGBCore *core, const E1000E_RingInfo *rxi) static void igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, const E1000E_RxRing *rxr, - const E1000E_RSSInfo *rss_info) + const E1000E_RSSInfo *rss_info, + uint16_t etqf, bool ts) { PCIDevice *d; dma_addr_t base; @@ -1558,7 +1637,7 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, } igb_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, - rss_info, written); + rss_info, etqf, ts, written); igb_pci_dma_write_rx_desc(core, d, base, &desc, core->rx_desc_len); igb_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN); @@ -1602,19 +1681,22 @@ static ssize_t igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, bool has_vnet, bool *external_tx) { - static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4); - uint16_t queues = 0; - uint32_t n = 0; - uint8_t min_buf[ETH_ZLEN]; + uint32_t causes = 0; + uint32_t ecauses = 0; + union { + L2Header l2_header; + uint8_t octets[ETH_ZLEN]; + } buf; struct iovec min_iov; - struct eth_header *ehdr; - uint8_t *filter_buf; size_t size, orig_size; size_t iov_ofs = 0; E1000E_RxRing rxr; E1000E_RSSInfo rss_info; + uint16_t etqf; + bool ts; size_t total_size; + int strip_vlan_index; int i; trace_e1000e_rx_receive_iov(iovcnt); @@ -1635,36 +1717,30 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, net_rx_pkt_unset_vhdr(core->rx_pkt); } - filter_buf = iov->iov_base + iov_ofs; orig_size = iov_size(iov, iovcnt); size = orig_size - iov_ofs; /* Pad to minimum Ethernet frame length */ - if (size < sizeof(min_buf)) { - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); - memset(&min_buf[size], 0, sizeof(min_buf) - size); + if (size < sizeof(buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, size); + memset(&buf.octets[size], 0, sizeof(buf) - size); e1000x_inc_reg_if_not_full(core->mac, RUC); - min_iov.iov_base = filter_buf = min_buf; - min_iov.iov_len = size = sizeof(min_buf); + min_iov.iov_base = &buf; + min_iov.iov_len = size = sizeof(buf); iovcnt = 1; iov = &min_iov; iov_ofs = 0; - } else if (iov->iov_len < maximum_ethernet_hdr_len) { - /* This is very unlikely, but may happen. */ - iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); - filter_buf = min_buf; - } - - /* Discard oversized packets if !LPE and !SBP. */ - if (e1000x_is_oversized(core->mac, size)) { - return orig_size; + } else { + iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header)); } - ehdr = PKT_GET_ETH_HDR(filter_buf); - net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr)); - net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size); + net_rx_pkt_set_packet_type(core->rx_pkt, + get_eth_packet_type(&buf.l2_header.eth)); + net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs); - queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx); + queues = igb_receive_assign(core, iov, iovcnt, iov_ofs, + &buf.l2_header, size, + &rss_info, &etqf, &ts, external_tx); if (!queues) { trace_e1000e_rx_flt_dropped(); return orig_size; @@ -1678,36 +1754,46 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, igb_rx_ring_init(core, &rxr, i); + if (!igb_rx_strip_vlan(core, rxr.i)) { + strip_vlan_index = -1; + } else if (core->mac[CTRL_EXT] & BIT(26)) { + strip_vlan_index = 1; + } else { + strip_vlan_index = 0; + } + net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, - igb_rx_strip_vlan(core, rxr.i), - core->mac[VET] & 0xffff); + strip_vlan_index, + core->mac[VET] & 0xffff, + core->mac[VET] >> 16); total_size = net_rx_pkt_get_total_len(core->rx_pkt) + e1000x_fcs_len(core->mac); if (!igb_has_rxbufs(core, rxr.i, total_size)) { - n |= E1000_ICS_RXO; + causes |= E1000_ICS_RXO; trace_e1000e_rx_not_written_to_guest(rxr.i->idx); continue; } - n |= E1000_ICR_RXDW; + causes |= E1000_ICR_RXDW; igb_rx_fix_l4_csum(core, core->rx_pkt); - igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); + igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts); /* Check if receive descriptor minimum threshold hit */ if (igb_rx_descr_threshold_hit(core, rxr.i)) { - n |= E1000_ICS_RXDMT0; + causes |= E1000_ICS_RXDMT0; } - core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx); + ecauses |= igb_rx_wb_eic(core, rxr.i->idx); trace_e1000e_rx_written_to_guest(rxr.i->idx); } - trace_e1000e_rx_interrupt_set(n); - igb_set_interrupt_cause(core, n); + trace_e1000e_rx_interrupt_set(causes); + igb_raise_interrupts(core, EICR, ecauses); + igb_raise_interrupts(core, ICR, causes); return orig_size; } @@ -1767,7 +1853,7 @@ void igb_core_set_link_status(IGBCore *core) } if (core->mac[STATUS] != old_status) { - igb_set_interrupt_cause(core, E1000_ICR_LSC); + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); } } @@ -1847,13 +1933,6 @@ igb_set_rx_control(IGBCore *core, int index, uint32_t val) } } -static inline void -igb_clear_ims_bits(IGBCore *core, uint32_t bits) -{ - trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); - core->mac[IMS] &= ~bits; -} - static inline bool igb_postpone_interrupt(IGBIntrDelayTimer *timer) { @@ -1876,10 +1955,8 @@ igb_eitr_should_postpone(IGBCore *core, int idx) return igb_postpone_interrupt(&core->eitr[idx]); } -static void igb_send_msix(IGBCore *core) +static void igb_send_msix(IGBCore *core, uint32_t causes) { - uint32_t causes = core->mac[EICR] & core->mac[EIMS]; - uint32_t effective_eiac; int vector; for (vector = 0; vector < IGB_INTR_NUM; ++vector) { @@ -1887,10 +1964,6 @@ static void igb_send_msix(IGBCore *core) trace_e1000e_irq_msix_notify_vec(vector); igb_msix_notify(core, vector); - - trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); - effective_eiac = core->mac[EIAC] & BIT(vector); - core->mac[EICR] &= ~effective_eiac; } } } @@ -1906,119 +1979,116 @@ igb_fix_icr_asserted(IGBCore *core) trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); } -static void -igb_update_interrupt_state(IGBCore *core) +static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes) { - uint32_t icr; - uint32_t causes; + uint32_t old_causes = core->mac[ICR] & core->mac[IMS]; + uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS]; + uint32_t raised_causes; + uint32_t raised_ecauses; uint32_t int_alloc; - icr = core->mac[ICR] & core->mac[IMS]; + trace_e1000e_irq_set(index << 2, + core->mac[index], core->mac[index] | causes); - if (msix_enabled(core->owner)) { - if (icr) { - causes = 0; - if (icr & E1000_ICR_DRSTA) { - int_alloc = core->mac[IVAR_MISC] & 0xff; - if (int_alloc & E1000_IVAR_VALID) { - causes |= BIT(int_alloc & 0x1f); - } + core->mac[index] |= causes; + + if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) { + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + + if (raised_causes & E1000_ICR_DRSTA) { + int_alloc = core->mac[IVAR_MISC] & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); } - /* Check if other bits (excluding the TCP Timer) are enabled. */ - if (icr & ~E1000_ICR_DRSTA) { - int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; - if (int_alloc & E1000_IVAR_VALID) { - causes |= BIT(int_alloc & 0x1f); - } - trace_e1000e_irq_add_msi_other(core->mac[EICR]); + } + /* Check if other bits (excluding the TCP Timer) are enabled. */ + if (raised_causes & ~E1000_ICR_DRSTA) { + int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; + if (int_alloc & E1000_IVAR_VALID) { + core->mac[EICR] |= BIT(int_alloc & 0x1f); } - core->mac[EICR] |= causes; } - if ((core->mac[EICR] & core->mac[EIMS])) { - igb_send_msix(core); + raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses; + if (!raised_ecauses) { + return; } + + igb_send_msix(core, raised_ecauses); } else { igb_fix_icr_asserted(core); - if (icr) { - core->mac[EICR] |= (icr & E1000_ICR_DRSTA) | E1000_EICR_OTHER; - } else { - core->mac[EICR] &= ~E1000_EICR_OTHER; + raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; + if (!raised_causes) { + return; } - trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], - core->mac[ICR], core->mac[IMS]); + core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER; - if (msi_enabled(core->owner)) { - if (icr) { - msi_notify(core->owner, 0); - } + if (msix_enabled(core->owner)) { + trace_e1000e_irq_msix_notify_vec(0); + msix_notify(core->owner, 0); + } else if (msi_enabled(core->owner)) { + trace_e1000e_irq_msi_notify(raised_causes); + msi_notify(core->owner, 0); } else { - if (icr) { - igb_raise_legacy_irq(core); - } else { - igb_lower_legacy_irq(core); - } + igb_raise_legacy_irq(core); } } } -static void -igb_set_interrupt_cause(IGBCore *core, uint32_t val) +static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes) { - trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); + trace_e1000e_irq_clear(index << 2, + core->mac[index], core->mac[index] & ~causes); - core->mac[ICR] |= val; + core->mac[index] &= ~causes; - trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); - igb_update_interrupt_state(core); + if (!(core->mac[ICR] & core->mac[IMS]) && + !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) { + core->mac[EICR] &= ~E1000_EICR_OTHER; + + if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) { + igb_lower_legacy_irq(core); + } + } } static void igb_set_eics(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eics(val, msix); - - core->mac[EICS] |= - val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK); - - /* - * TODO: Move to igb_update_interrupt_state if EICS is modified in other - * places. - */ - core->mac[EICR] = core->mac[EICS]; - - igb_update_interrupt_state(core); + igb_raise_interrupts(core, EICR, val & mask); } static void igb_set_eims(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eims(val, msix); - - core->mac[EIMS] |= - val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK); - - igb_update_interrupt_state(core); + igb_raise_interrupts(core, EIMS, val & mask); } static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn) { uint32_t ent = core->mac[VTIVAR_MISC + vfn]; + uint32_t causes; if ((ent & E1000_IVAR_VALID)) { - core->mac[EICR] |= (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM); - igb_update_interrupt_state(core); + causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM); + igb_raise_interrupts(core, EICR, causes); } } static void mailbox_interrupt_to_pf(IGBCore *core) { - igb_set_interrupt_cause(core, E1000_ICR_VMMB); + igb_raise_interrupts(core, ICR, E1000_ICR_VMMB); } static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val) @@ -2109,13 +2179,12 @@ static void igb_w1c(IGBCore *core, int index, uint32_t val) static void igb_set_eimc(IGBCore *core, int index, uint32_t val) { bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; - /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */ - core->mac[EIMS] &= - ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK)); + trace_igb_irq_write_eimc(val, msix); - trace_igb_irq_write_eimc(val, core->mac[EIMS], msix); - igb_update_interrupt_state(core); + /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */ + igb_lower_interrupts(core, EIMS, val & mask); } static void igb_set_eiac(IGBCore *core, int index, uint32_t val) @@ -2155,11 +2224,10 @@ static void igb_set_eicr(IGBCore *core, int index, uint32_t val) * TODO: In IOV mode, only bit zero of this vector is available for the PF * function. */ - core->mac[EICR] &= - ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK)); + uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK; trace_igb_irq_write_eicr(val, msix); - igb_update_interrupt_state(core); + igb_lower_interrupts(core, EICR, val & mask); } static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val) @@ -2259,7 +2327,7 @@ igb_autoneg_timer(void *opaque) igb_update_flowctl_status(core); /* signal link status change to the guest */ - igb_set_interrupt_cause(core, E1000_ICR_LSC); + igb_raise_interrupts(core, ICR, E1000_ICR_LSC); } } @@ -2332,7 +2400,7 @@ igb_set_mdic(IGBCore *core, int index, uint32_t val) core->mac[MDIC] = val | E1000_MDIC_READY; if (val & E1000_MDIC_INT_EN) { - igb_set_interrupt_cause(core, E1000_ICR_MDAC); + igb_raise_interrupts(core, ICR, E1000_ICR_MDAC); } } @@ -2440,49 +2508,39 @@ static void igb_set_ics(IGBCore *core, int index, uint32_t val) { trace_e1000e_irq_write_ics(val); - igb_set_interrupt_cause(core, val); + igb_raise_interrupts(core, ICR, val); } static void igb_set_imc(IGBCore *core, int index, uint32_t val) { trace_e1000e_irq_ims_clear_set_imc(val); - igb_clear_ims_bits(core, val); - igb_update_interrupt_state(core); + igb_lower_interrupts(core, IMS, val); } static void igb_set_ims(IGBCore *core, int index, uint32_t val) { - uint32_t valid_val = val & 0x77D4FBFD; - - trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); - core->mac[IMS] |= valid_val; - igb_update_interrupt_state(core); + igb_raise_interrupts(core, IMS, val & 0x77D4FBFD); } -static void igb_commit_icr(IGBCore *core) +static void igb_nsicr(IGBCore *core) { /* - * If GPIE.NSICR = 0, then the copy of IAM to IMS will occur only if at + * If GPIE.NSICR = 0, then the clear of IMS will occur only if at * least one bit is set in the IMS and there is a true interrupt as * reflected in ICR.INTA. */ if ((core->mac[GPIE] & E1000_GPIE_NSICR) || (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) { - igb_set_ims(core, IMS, core->mac[IAM]); - } else { - igb_update_interrupt_state(core); + igb_lower_interrupts(core, IMS, core->mac[IAM]); } } static void igb_set_icr(IGBCore *core, int index, uint32_t val) { - uint32_t icr = core->mac[ICR] & ~val; - - trace_igb_irq_icr_write(val, core->mac[ICR], icr); - core->mac[ICR] = icr; - igb_commit_icr(core); + igb_nsicr(core); + igb_lower_interrupts(core, ICR, val); } static uint32_t @@ -2533,21 +2591,21 @@ static uint32_t igb_mac_icr_read(IGBCore *core, int index) { uint32_t ret = core->mac[ICR]; - trace_e1000e_irq_icr_read_entry(ret); if (core->mac[GPIE] & E1000_GPIE_NSICR) { trace_igb_irq_icr_clear_gpie_nsicr(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); } else if (core->mac[IMS] == 0) { trace_e1000e_irq_icr_clear_zero_ims(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); + } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) { + igb_lower_interrupts(core, ICR, 0xffffffff); } else if (!msix_enabled(core->owner)) { trace_e1000e_irq_icr_clear_nonmsix_icr_read(); - core->mac[ICR] = 0; + igb_lower_interrupts(core, ICR, 0xffffffff); } - trace_e1000e_irq_icr_read_exit(core->mac[ICR]); - igb_commit_icr(core); + igb_nsicr(core); return ret; } @@ -3282,6 +3340,8 @@ static const readops igb_macreg_readops[] = { [EIAM] = igb_mac_readreg, [IVAR0 ... IVAR0 + 7] = igb_mac_readreg, igb_getreg(IVAR_MISC), + igb_getreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_readreg, igb_getreg(VT_CTL), [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg, [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read, @@ -3689,6 +3749,8 @@ static const writeops igb_macreg_writeops[] = { [EIMS] = igb_set_eims, [IVAR0 ... IVAR0 + 7] = igb_mac_writereg, igb_putreg(IVAR_MISC), + igb_putreg(TSYNCRXCFG), + [ETQF0 ... ETQF0 + 7] = igb_mac_writereg, igb_putreg(VT_CTL), [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox, [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox, @@ -3955,7 +4017,7 @@ igb_core_pci_realize(IGBCore *core, core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS); + net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); } net_rx_pkt_init(&core->rx_pkt); @@ -3980,7 +4042,6 @@ igb_core_pci_uninit(IGBCore *core) qemu_del_vm_change_state_handler(core->vmstate); for (i = 0; i < IGB_NUM_QUEUES; i++) { - net_tx_pkt_reset(core->tx[i].tx_pkt, NULL); net_tx_pkt_uninit(core->tx[i].tx_pkt); } @@ -4073,54 +4134,54 @@ static const uint32_t igb_mac_reg_init[] = { [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC, [RPLOLR] = E1000_RPLOLR_STRCRC, [RLPML] = 0x2600, - [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, - [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_TX_WB_RO_EN | - E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, + [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_TX_WB_RO_EN | + E1000_DCA_TXCTRL_DESC_RRO_EN, }; static void igb_reset(IGBCore *core, bool sw) @@ -4159,7 +4220,6 @@ static void igb_reset(IGBCore *core, bool sw) for (i = 0; i < ARRAY_SIZE(core->tx); i++) { tx = &core->tx[i]; - net_tx_pkt_reset(tx->tx_pkt, NULL); memset(tx->ctx, 0, sizeof(tx->ctx)); tx->first = true; tx->skip_cp = false; diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index c5c5b3c..82ff195 100644 --- a/hw/net/igb_regs.h +++ b/hw/net/igb_regs.h @@ -42,11 +42,6 @@ union e1000_adv_tx_desc { } wb; }; -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor Extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP/UDP Segmentation Enable */ - #define E1000_ADVTXD_POTS_IXSM 0x00000100 /* Insert TCP/UDP Checksum */ #define E1000_ADVTXD_POTS_TXSM 0x00000200 /* Insert TCP/UDP Checksum */ @@ -151,6 +146,10 @@ union e1000_adv_rx_desc { #define IGB_82576_VF_DEV_ID 0x10CA #define IGB_I350_VF_DEV_ID 0x1520 +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + /* from igb/e1000_82575.h */ #define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 @@ -160,6 +159,29 @@ union e1000_adv_rx_desc { #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + /* Additional Transmit Descriptor Control definitions */ #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ @@ -188,6 +210,15 @@ union e1000_adv_rx_desc { #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + #define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ @@ -291,6 +322,9 @@ union e1000_adv_rx_desc { /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ #define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 @@ -362,6 +396,20 @@ union e1000_adv_rx_desc { #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ @@ -637,10 +685,19 @@ union e1000_adv_rx_desc { #define E1000_RSS_QUEUE(reta, hash) (E1000_RETA_VAL(reta, hash) & 0x0F) +#define E1000_MRQ_RSS_TYPE_IPV4UDP 7 +#define E1000_MRQ_RSS_TYPE_IPV6UDP 8 + #define E1000_STATUS_IOV_MODE 0x00040000 #define E1000_STATUS_NUM_VFS_SHIFT 14 +#define E1000_ADVRXD_PKT_IP4 BIT(4) +#define E1000_ADVRXD_PKT_IP6 BIT(6) +#define E1000_ADVRXD_PKT_TCP BIT(8) +#define E1000_ADVRXD_PKT_UDP BIT(9) +#define E1000_ADVRXD_PKT_SCTP BIT(10) + static inline uint8_t igb_ivar_entry_rx(uint8_t i) { return i < 8 ? i * 4 : (i - 8) * 4 + 2; diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c index 70beb7a..284ea61 100644 --- a/hw/net/igbvf.c +++ b/hw/net/igbvf.c @@ -50,15 +50,8 @@ #include "trace.h" #include "qapi/error.h" -#define TYPE_IGBVF "igbvf" OBJECT_DECLARE_SIMPLE_TYPE(IgbVfState, IGBVF) -#define IGBVF_MMIO_BAR_IDX (0) -#define IGBVF_MSIX_BAR_IDX (3) - -#define IGBVF_MMIO_SIZE (16 * 1024) -#define IGBVF_MSIX_SIZE (16 * 1024) - struct IgbVfState { PCIDevice parent_obj; diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c index 39cdea0..32e5f3f 100644 --- a/hw/net/net_rx_pkt.c +++ b/hw/net/net_rx_pkt.c @@ -16,6 +16,7 @@ */ #include "qemu/osdep.h" +#include "qemu/crc32c.h" #include "trace.h" #include "net_rx_pkt.h" #include "net/checksum.h" @@ -23,7 +24,10 @@ struct NetRxPkt { struct virtio_net_hdr virt_hdr; - uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)]; + struct { + struct eth_header eth; + struct vlan_header vlan; + } ehdr_buf; struct iovec *vec; uint16_t vec_len_total; uint16_t vec_len; @@ -89,7 +93,7 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, if (pkt->ehdr_buf_len) { net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); - pkt->vec[0].iov_base = pkt->ehdr_buf; + pkt->vec[0].iov_base = &pkt->ehdr_buf; pkt->vec[0].iov_len = pkt->ehdr_buf_len; pkt->tot_len = pllen + pkt->ehdr_buf_len; @@ -103,7 +107,7 @@ net_rx_pkt_pull_data(struct NetRxPkt *pkt, iov, iovcnt, ploff, pkt->tot_len); } - eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->hasip4, &pkt->hasip6, + eth_get_protocols(pkt->vec, pkt->vec_len, 0, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); @@ -120,7 +124,7 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, assert(pkt); if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf, + pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, &pkt->ehdr_buf, &ploff, &tci); } else { pkt->ehdr_buf_len = 0; @@ -133,20 +137,17 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet) + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext) { uint16_t tci = 0; uint16_t ploff = iovoff; assert(pkt); - if (strip_vlan) { - pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet, - pkt->ehdr_buf, - &ploff, &tci); - } else { - pkt->ehdr_buf_len = 0; - } + pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, + strip_vlan_index, vet, vet_ext, + &pkt->ehdr_buf, + &ploff, &tci); pkt->tci = tci; @@ -186,17 +187,13 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt) return pkt->tot_len; } -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len) +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff) { - const struct iovec iov = { - .iov_base = (void *)data, - .iov_len = len - }; - assert(pkt); - eth_get_protocols(&iov, 1, &pkt->hasip4, &pkt->hasip6, + eth_get_protocols(iov, iovcnt, iovoff, &pkt->hasip4, &pkt->hasip6, &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); } @@ -240,11 +237,6 @@ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt) return &pkt->ip4hdr_info; } -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt) -{ - return &pkt->l4hdr_info; -} - static inline void _net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written, void *ptr, size_t size) @@ -560,32 +552,73 @@ _net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) return csum; } -bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) +static bool +_net_rx_pkt_validate_sctp_sum(struct NetRxPkt *pkt) { - uint16_t csum; + size_t csum_off; + size_t off = pkt->l4hdr_off; + size_t vec_len = pkt->vec_len; + struct iovec *vec; + uint32_t calculated = 0; + uint32_t original; + bool valid; - trace_net_rx_pkt_l4_csum_validate_entry(); + for (vec = pkt->vec; vec->iov_len < off; vec++) { + off -= vec->iov_len; + vec_len--; + } - if (pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_TCP && - pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_UDP) { - trace_net_rx_pkt_l4_csum_validate_not_xxp(); + csum_off = off + 8; + + if (!iov_to_buf(vec, vec_len, csum_off, &original, sizeof(original))) { return false; } - if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP && - pkt->l4hdr_info.hdr.udp.uh_sum == 0) { - trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + if (!iov_from_buf(vec, vec_len, csum_off, + &calculated, sizeof(calculated))) { return false; } + calculated = crc32c(0xffffffff, + (uint8_t *)vec->iov_base + off, vec->iov_len - off); + calculated = iov_crc32c(calculated ^ 0xffffffff, vec + 1, vec_len - 1); + valid = calculated == le32_to_cpu(original); + iov_from_buf(vec, vec_len, csum_off, &original, sizeof(original)); + + return valid; +} + +bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) +{ + uint32_t csum; + + trace_net_rx_pkt_l4_csum_validate_entry(); + if (pkt->hasip4 && pkt->ip4hdr_info.fragment) { trace_net_rx_pkt_l4_csum_validate_ip4_fragment(); return false; } - csum = _net_rx_pkt_calc_l4_csum(pkt); + switch (pkt->l4hdr_info.proto) { + case ETH_L4_HDR_PROTO_UDP: + if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { + trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + return false; + } + /* fall through */ + case ETH_L4_HDR_PROTO_TCP: + csum = _net_rx_pkt_calc_l4_csum(pkt); + *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + break; + + case ETH_L4_HDR_PROTO_SCTP: + *csum_valid = _net_rx_pkt_validate_sctp_sum(pkt); + break; - *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + default: + trace_net_rx_pkt_l4_csum_validate_not_xxp(); + return false; + } trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid); diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h index d00b484..55ec67a 100644 --- a/hw/net/net_rx_pkt.h +++ b/hw/net/net_rx_pkt.h @@ -55,12 +55,14 @@ size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt); * parse and set packet analysis results * * @pkt: packet - * @data: pointer to the data buffer to be parsed - * @len: data length + * @iov: received data scatter-gather list + * @iovcnt: number of elements in iov + * @iovoff: data start offset in the iov * */ -void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, - size_t len); +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, + const struct iovec *iov, size_t iovcnt, + size_t iovoff); /** * fetches packet analysis results @@ -117,15 +119,6 @@ eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt); */ eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt); -/** - * fetches L4 header analysis results - * - * Return: pointer to analysis results structure which is stored in internal - * packet area. - * - */ -eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt); - typedef enum { NetPktRssIpV4, NetPktRssIpV4Tcp, @@ -230,18 +223,19 @@ void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, /** * attach scatter-gather data to rx packet * -* @pkt: packet -* @iov: received data scatter-gather list -* @iovcnt number of elements in iov -* @iovoff data start offset in the iov -* @strip_vlan: should the module strip vlan from data -* @vet: VLAN tag Ethernet type +* @pkt: packet +* @iov: received data scatter-gather list +* @iovcnt: number of elements in iov +* @iovoff: data start offset in the iov +* @strip_vlan_index: index of Q tag if it is to be stripped. negative otherwise. +* @vet: VLAN tag Ethernet type +* @vet_ext: outer VLAN tag Ethernet type * */ void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, - const struct iovec *iov, int iovcnt, - size_t iovoff, bool strip_vlan, - uint16_t vet); + const struct iovec *iov, int iovcnt, + size_t iovoff, int strip_vlan_index, + uint16_t vet, uint16_t vet_ext); /** * attach data to rx packet diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 8dc8568..2e5f58b 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -16,12 +16,13 @@ */ #include "qemu/osdep.h" -#include "net_tx_pkt.h" +#include "qemu/crc32c.h" #include "net/eth.h" #include "net/checksum.h" #include "net/tap.h" #include "net/net.h" #include "hw/pci/pci_device.h" +#include "net_tx_pkt.h" enum { NET_TX_PKT_VHDR_FRAG = 0, @@ -32,8 +33,6 @@ enum { /* TX packet private context */ struct NetTxPkt { - PCIDevice *pci_dev; - struct virtio_net_hdr virt_hdr; struct iovec *raw; @@ -42,7 +41,10 @@ struct NetTxPkt { struct iovec *vec; - uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; + struct { + struct eth_header eth; + struct vlan_header vlan[3]; + } l2_hdr; union { struct ip_header ip; struct ip6_header ip6; @@ -59,13 +61,10 @@ struct NetTxPkt { uint8_t l4proto; }; -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags) +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags) { struct NetTxPkt *p = g_malloc0(sizeof *p); - p->pci_dev = pci_dev; - p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); p->raw = g_new(struct iovec, max_frags); @@ -137,6 +136,23 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); } +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt) +{ + uint32_t csum = 0; + struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG; + + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + csum = cpu_to_le32(iov_crc32c(0xffffffff, pl_start_frag, pkt->payload_frags)); + if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { + return false; + } + + return true; +} + static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) { pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + @@ -370,24 +386,17 @@ bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, uint16_t vlan, uint16_t vlan_ethtype) { - bool is_new; assert(pkt); - eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, - vlan, vlan_ethtype, &is_new); + eth_setup_vlan_headers(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, + &pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, + vlan, vlan_ethtype); - /* update l2hdrlen */ - if (is_new) { - pkt->hdr_len += sizeof(struct vlan_header); - pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += - sizeof(struct vlan_header); - } + pkt->hdr_len += sizeof(struct vlan_header); } -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len) +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len) { - hwaddr mapped_len = 0; struct iovec *ventry; assert(pkt); @@ -395,23 +404,12 @@ bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, return false; } - if (!len) { - return true; - } - ventry = &pkt->raw[pkt->raw_frags]; - mapped_len = len; - - ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, - &mapped_len, DMA_DIRECTION_TO_DEVICE); + ventry->iov_base = base; + ventry->iov_len = len; + pkt->raw_frags++; - if ((ventry->iov_base != NULL) && (len == mapped_len)) { - ventry->iov_len = mapped_len; - pkt->raw_frags++; - return true; - } else { - return false; - } + return true; } bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) @@ -445,7 +443,8 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt) #endif } -void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev) +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context) { int i; @@ -465,17 +464,37 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev) assert(pkt->raw); for (i = 0; i < pkt->raw_frags; i++) { assert(pkt->raw[i].iov_base); - pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, - pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); + callback(context, pkt->raw[i].iov_base, pkt->raw[i].iov_len); } } - pkt->pci_dev = pci_dev; pkt->raw_frags = 0; pkt->hdr_len = 0; pkt->l4proto = 0; } +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len) +{ + pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0); +} + +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len) +{ + dma_addr_t mapped_len = len; + void *base = pci_dma_map(pci_dev, pa, &mapped_len, DMA_DIRECTION_TO_DEVICE); + if (!base) { + return false; + } + + if (mapped_len != len || !net_tx_pkt_add_raw_fragment(pkt, base, len)) { + net_tx_pkt_unmap_frag_pci(pci_dev, base, mapped_len); + return false; + } + + return true; +} + static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt, struct iovec *iov, uint32_t iov_len, uint16_t csl) @@ -697,7 +716,7 @@ static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt, } static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, - NetTxPktCallback callback, + NetTxPktSend callback, void *context) { uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; @@ -794,7 +813,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) } bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, - NetTxPktCallback callback, void *context) + NetTxPktSend callback, void *context) { assert(pkt); diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h index e5ce6f2..0a716e7 100644 --- a/hw/net/net_tx_pkt.h +++ b/hw/net/net_tx_pkt.h @@ -26,17 +26,16 @@ struct NetTxPkt; -typedef void (* NetTxPktCallback)(void *, const struct iovec *, int, const struct iovec *, int); +typedef void (*NetTxPktFreeFrag)(void *, void *, size_t); +typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int); /** * Init function for tx packet functionality * * @pkt: packet pointer - * @pci_dev: PCI device processing this packet * @max_frags: max tx ip fragments */ -void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, - uint32_t max_frags); +void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags); /** * Clean all tx packet resources. @@ -95,12 +94,11 @@ net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan) * populate data fragment into pkt context. * * @pkt: packet - * @pa: physical address of fragment + * @base: pointer to fragment * @len: length of fragment * */ -bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, - size_t len); +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len); /** * Fix ip header fields and calculate IP header and pseudo header checksums. @@ -119,6 +117,14 @@ void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt); void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt); /** + * Calculate the SCTP checksum. + * + * @pkt: packet + * + */ +bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt); + +/** * get length of all populated data. * * @pkt: packet @@ -148,10 +154,30 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt); * reset tx packet private context (needed to be called between packets) * * @pkt: packet - * @dev: PCI device processing the next packet + * @callback: function to free the fragments + * @context: pointer to be passed to the callback + */ +void net_tx_pkt_reset(struct NetTxPkt *pkt, + NetTxPktFreeFrag callback, void *context); + +/** + * Unmap a fragment mapped from a PCI device. + * + * @context: PCI device owning fragment + * @base: pointer to fragment + * @len: length of fragment + */ +void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len); + +/** + * map data fragment from PCI device and populate it into pkt context. * + * @pci_dev: PCI device owning fragment + * @pa: physical address of fragment + * @len: length of fragment */ -void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev); +bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev, + dma_addr_t pa, size_t len); /** * Send packet to qemu. handles sw offloads if vhdr is not supported. @@ -173,7 +199,7 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc); * @ret: operation result */ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload, - NetTxPktCallback callback, void *context); + NetTxPktSend callback, void *context); /** * parse raw packet data and analyze offload requirements. diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 5a5aaf8..5f1a4d3 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -2154,6 +2154,9 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) int large_send_mss = (txdw0 >> CP_TC_LGSEN_MSS_SHIFT) & CP_TC_LGSEN_MSS_MASK; + if (large_send_mss == 0) { + goto skip_offload; + } DPRINTF("+++ C+ mode offloaded task TSO IP data %d " "frame data %d specified MSS=%d\n", diff --git a/hw/net/trace-events b/hw/net/trace-events index d35554f..e4a98b2 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -106,6 +106,8 @@ e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: # e1000x_common.c e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" +e1000x_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" +e1000x_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000x_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_ucast_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x" e1000x_rx_flt_inexact_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint32_t mo, uint32_t mta, uint32_t mta_val) "inexact mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] 0x%x" @@ -154,8 +156,6 @@ e1000e_rx_can_recv_rings_full(void) "Cannot receive: all rings are full" e1000e_rx_can_recv(void) "Can receive" e1000e_rx_has_buffers(int ridx, uint32_t free_desc, size_t total_size, uint32_t desc_buf_size) "ring #%d: free descr: %u, packet size %zu, descr buffer size %u" e1000e_rx_null_descriptor(void) "Null RX descriptor!!" -e1000e_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" -e1000e_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" e1000e_rx_desc_ps_read(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) "buffers: [0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64"]" e1000e_rx_desc_ps_write(uint16_t a0, uint16_t a1, uint16_t a2, uint16_t a3) "bytes written: [%u, %u, %u, %u]" e1000e_rx_desc_buff_sizes(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) "buffer sizes: [%u, %u, %u, %u]" @@ -179,7 +179,7 @@ e1000e_rx_rss_disabled(void) "RSS is disabled" e1000e_rx_rss_type(uint32_t type) "RSS type is %u" e1000e_rx_rss_ip4(int l4hdr_proto, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: L4 header protocol %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d" e1000e_rx_rss_ip6_rfctl(uint32_t rfctl) "RSS IPv6: rfctl 0x%X" -e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d" +e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6ex_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6ex enabled %d, ipv6ex enabled %d, ipv6 enabled %d" e1000e_rx_metadata_protocols(bool hasip4, bool hasip6, int l4hdr_protocol) "protocols: ip4: %d, ip6: %d, l4hdr: %d" e1000e_rx_metadata_vlan(uint16_t vlan_tag) "VLAN tag is 0x%X" @@ -205,21 +205,16 @@ e1000e_irq_msix_notify_postponed_vec(int idx) "Sending MSI-X postponed by EITR[% e1000e_irq_legacy_notify(bool level) "IRQ line state: %d" e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x" e1000e_irq_postponed_by_xitr(uint32_t reg) "Interrupt postponed by [E]ITR register 0x%x" -e1000e_irq_clear_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Clearing IMS bits 0x%x: 0x%x --> 0x%x" -e1000e_irq_set_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Setting IMS bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_clear(uint32_t offset, uint32_t old, uint32_t new) "Clearing interrupt register 0x%x: 0x%x --> 0x%x" +e1000e_irq_set(uint32_t offset, uint32_t old, uint32_t new) "Setting interrupt register 0x%x: 0x%x --> 0x%x" e1000e_irq_fix_icr_asserted(uint32_t new_val) "ICR_ASSERTED bit fixed: 0x%x" e1000e_irq_add_msi_other(uint32_t new_val) "ICR_OTHER bit added: 0x%x" e1000e_irq_pending_interrupts(uint32_t pending, uint32_t icr, uint32_t ims) "ICR PENDING: 0x%x (ICR: 0x%x, IMS: 0x%x)" -e1000e_irq_set_cause_entry(uint32_t val, uint32_t icr) "Going to set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_set_cause_exit(uint32_t val, uint32_t icr) "Set IRQ cause 0x%x, ICR: 0x%x" -e1000e_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" e1000e_irq_write_ics(uint32_t val) "Adding ICR bits 0x%x" e1000e_irq_icr_process_iame(void) "Clearing IMS bits due to IAME" e1000e_irq_read_ics(uint32_t ics) "Current ICS: 0x%x" e1000e_irq_read_ims(uint32_t ims) "Current IMS: 0x%x" e1000e_irq_icr_clear_nonmsix_icr_read(void) "Clearing ICR on read due to non MSI-X int" -e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x" -e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x" e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS" e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME" e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X" @@ -235,7 +230,6 @@ e1000e_irq_tidv_fpd_not_running(void) "FPD written while TIDV was not running" e1000e_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = %u" e1000e_irq_itr_set(uint32_t val) "ITR = %u" e1000e_irq_fire_all_timers(uint32_t val) "Firing all delay/throttling timers on all interrupts enable (0x%X written to IMS)" -e1000e_irq_adding_delayed_causes(uint32_t val, uint32_t icr) "Merging delayed causes 0x%X to ICR 0x%X" e1000e_irq_msix_pending_clearing(uint32_t cause, uint32_t int_cfg, uint32_t vec) "Clearing MSI-X pending bit for cause 0x%x, IVAR config 0x%x, vector %u" e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration for cause 0x%x: 0x%x" @@ -288,12 +282,11 @@ igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint3 igb_rx_metadata_rss(uint32_t rss) "RSS data: 0x%X" igb_irq_icr_clear_gpie_nsicr(void) "Clearing ICR on read due to GPIE.NSICR enabled" -igb_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" igb_irq_set_iam(uint32_t icr) "Update IAM: 0x%x" igb_irq_read_iam(uint32_t icr) "Current IAM: 0x%x" igb_irq_write_eics(uint32_t val, bool msix) "Update EICS: 0x%x MSI-X: %d" igb_irq_write_eims(uint32_t val, bool msix) "Update EIMS: 0x%x MSI-X: %d" -igb_irq_write_eimc(uint32_t val, uint32_t eims, bool msix) "Update EIMC: 0x%x EIMS: 0x%x MSI-X: %d" +igb_irq_write_eimc(uint32_t val, bool msix) "Update EIMC: 0x%x MSI-X: %d" igb_irq_write_eiac(uint32_t val) "Update EIAC: 0x%x" igb_irq_write_eiam(uint32_t val, bool msix) "Update EIAM: 0x%x MSI-X: %d" igb_irq_write_eicr(uint32_t val, bool msix) "Update EICR: 0x%x MSI-X: %d" diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 8ce239a..6df6b73 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1834,9 +1834,12 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, VIRTIO_NET_HASH_REPORT_UDPv6, VIRTIO_NET_HASH_REPORT_UDPv6_EX }; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; - net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len, - size - n->host_hdr_len); + net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len); net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto, n->rss_data.hash_types); diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index f7b874c..18b9edf 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -651,9 +651,8 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; data_pa = txd.addr; - if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, - data_pa, - data_len)) { + if (!net_tx_pkt_add_raw_fragment_pci(s->tx_pkt, PCI_DEVICE(s), + data_pa, data_len)) { s->skip_current_tx_pkt = true; } } @@ -678,9 +677,12 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; - net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); + net_tx_pkt_reset(s->tx_pkt, + net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } } + + net_tx_pkt_reset(s->tx_pkt, net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s)); } static inline void @@ -1159,7 +1161,6 @@ static void vmxnet3_deactivate_device(VMXNET3State *s) { if (s->device_active) { VMW_CBPRN("Deactivating vmxnet3..."); - net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s)); net_tx_pkt_uninit(s->tx_pkt); net_rx_pkt_uninit(s->rx_pkt); s->device_active = false; @@ -1519,7 +1520,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Preallocate TX packet wrapper */ VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); net_rx_pkt_init(&s->rx_pkt); /* Read rings memory locations for RX queues */ @@ -2001,7 +2002,12 @@ vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) get_eth_packet_type(PKT_GET_ETH_HDR(buf))); if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { - net_rx_pkt_set_protocols(s->rx_pkt, buf, size); + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = size + }; + + net_rx_pkt_set_protocols(s->rx_pkt, &iov, 1, 0); vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; @@ -2399,7 +2405,7 @@ static int vmxnet3_post_load(void *opaque, int version_id) { VMXNET3State *s = opaque; - net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags); + net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags); net_rx_pkt_init(&s->rx_pkt); if (s->msix_used) { |