|
@@ -67,6 +67,11 @@ typedef struct IGBTxPktVmdqCallbackContext {
|
|
NetClientState *nc;
|
|
NetClientState *nc;
|
|
} IGBTxPktVmdqCallbackContext;
|
|
} IGBTxPktVmdqCallbackContext;
|
|
|
|
|
|
|
|
+typedef struct L2Header {
|
|
|
|
+ struct eth_header eth;
|
|
|
|
+ struct vlan_header vlan;
|
|
|
|
+} L2Header;
|
|
|
|
+
|
|
static ssize_t
|
|
static ssize_t
|
|
igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
bool has_vnet, bool *external_tx);
|
|
bool has_vnet, bool *external_tx);
|
|
@@ -402,7 +407,7 @@ igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- if (insert_vlan && e1000x_vlan_enabled(core->mac)) {
|
|
|
|
|
|
+ if (insert_vlan) {
|
|
net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
|
|
net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
|
|
core->mac[VET] & 0xffff);
|
|
core->mac[VET] & 0xffff);
|
|
}
|
|
}
|
|
@@ -538,9 +543,8 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn)
|
|
g_assert_not_reached();
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
|
|
- core->mac[GPTC] = core->mac[TPT];
|
|
|
|
- core->mac[GOTCL] = core->mac[TOTL];
|
|
|
|
- core->mac[GOTCH] = core->mac[TOTH];
|
|
|
|
|
|
+ e1000x_inc_reg_if_not_full(core->mac, GPTC);
|
|
|
|
+ e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
|
|
|
|
|
|
if (core->mac[MRQC] & 1) {
|
|
if (core->mac[MRQC] & 1) {
|
|
uint16_t pool = qn % IGB_NUM_VM_POOLS;
|
|
uint16_t pool = qn % IGB_NUM_VM_POOLS;
|
|
@@ -961,15 +965,16 @@ igb_rx_is_oversized(IGBCore *core, uint16_t qn, size_t size)
|
|
return size > (lpe ? max_ethernet_lpe_size : max_ethernet_vlan_size);
|
|
return size > (lpe ? max_ethernet_lpe_size : max_ethernet_vlan_size);
|
|
}
|
|
}
|
|
|
|
|
|
-static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
|
|
|
|
|
|
+static uint16_t igb_receive_assign(IGBCore *core, const L2Header *l2_header,
|
|
size_t size, E1000E_RSSInfo *rss_info,
|
|
size_t size, E1000E_RSSInfo *rss_info,
|
|
bool *external_tx)
|
|
bool *external_tx)
|
|
{
|
|
{
|
|
static const int ta_shift[] = { 4, 3, 2, 0 };
|
|
static const int ta_shift[] = { 4, 3, 2, 0 };
|
|
|
|
+ const struct eth_header *ehdr = &l2_header->eth;
|
|
uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
|
|
uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
|
|
uint16_t queues = 0;
|
|
uint16_t queues = 0;
|
|
uint16_t oversized = 0;
|
|
uint16_t oversized = 0;
|
|
- uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(ehdr)->h_tci) & VLAN_VID_MASK;
|
|
|
|
|
|
+ uint16_t vid = be16_to_cpu(l2_header->vlan.h_tci) & VLAN_VID_MASK;
|
|
bool accepted = false;
|
|
bool accepted = false;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -1227,7 +1232,6 @@ igb_build_rx_metadata(IGBCore *core,
|
|
struct virtio_net_hdr *vhdr;
|
|
struct virtio_net_hdr *vhdr;
|
|
bool hasip4, hasip6;
|
|
bool hasip4, hasip6;
|
|
EthL4HdrProto l4hdr_proto;
|
|
EthL4HdrProto l4hdr_proto;
|
|
- uint32_t pkt_type;
|
|
|
|
|
|
|
|
*status_flags = E1000_RXD_STAT_DD;
|
|
*status_flags = E1000_RXD_STAT_DD;
|
|
|
|
|
|
@@ -1266,28 +1270,29 @@ igb_build_rx_metadata(IGBCore *core,
|
|
trace_e1000e_rx_metadata_ack();
|
|
trace_e1000e_rx_metadata_ack();
|
|
}
|
|
}
|
|
|
|
|
|
- if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
|
|
|
|
- trace_e1000e_rx_metadata_ipv6_filtering_disabled();
|
|
|
|
- pkt_type = E1000_RXD_PKT_MAC;
|
|
|
|
- } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP ||
|
|
|
|
- l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
|
|
|
|
- pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
|
|
|
|
- } else if (hasip4 || hasip6) {
|
|
|
|
- pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
|
|
|
|
- } else {
|
|
|
|
- pkt_type = E1000_RXD_PKT_MAC;
|
|
|
|
- }
|
|
|
|
|
|
+ if (pkt_info) {
|
|
|
|
+ *pkt_info = rss_info->enabled ? rss_info->type : 0;
|
|
|
|
|
|
- trace_e1000e_rx_metadata_pkt_type(pkt_type);
|
|
|
|
|
|
+ if (hasip4) {
|
|
|
|
+ *pkt_info |= E1000_ADVRXD_PKT_IP4;
|
|
|
|
+ }
|
|
|
|
|
|
- if (pkt_info) {
|
|
|
|
- if (rss_info->enabled) {
|
|
|
|
- *pkt_info = rss_info->type;
|
|
|
|
|
|
+ if (hasip6) {
|
|
|
|
+ *pkt_info |= E1000_ADVRXD_PKT_IP6;
|
|
}
|
|
}
|
|
|
|
|
|
- *pkt_info |= (pkt_type << 4);
|
|
|
|
- } else {
|
|
|
|
- *status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
|
|
|
|
|
|
+ switch (l4hdr_proto) {
|
|
|
|
+ case ETH_L4_HDR_PROTO_TCP:
|
|
|
|
+ *pkt_info |= E1000_ADVRXD_PKT_TCP;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case ETH_L4_HDR_PROTO_UDP:
|
|
|
|
+ *pkt_info |= E1000_ADVRXD_PKT_UDP;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
if (hdr_info) {
|
|
if (hdr_info) {
|
|
@@ -1438,29 +1443,17 @@ igb_write_to_rx_buffers(IGBCore *core,
|
|
|
|
|
|
static void
|
|
static void
|
|
igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi,
|
|
igb_update_rx_stats(IGBCore *core, const E1000E_RingInfo *rxi,
|
|
- size_t data_size, size_t data_fcs_size)
|
|
|
|
|
|
+ size_t pkt_size, size_t pkt_fcs_size)
|
|
{
|
|
{
|
|
- e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size);
|
|
|
|
-
|
|
|
|
- switch (net_rx_pkt_get_packet_type(core->rx_pkt)) {
|
|
|
|
- case ETH_PKT_BCAST:
|
|
|
|
- e1000x_inc_reg_if_not_full(core->mac, BPRC);
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- case ETH_PKT_MCAST:
|
|
|
|
- e1000x_inc_reg_if_not_full(core->mac, MPRC);
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- default:
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt);
|
|
|
|
+ e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size);
|
|
|
|
|
|
if (core->mac[MRQC] & 1) {
|
|
if (core->mac[MRQC] & 1) {
|
|
uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
|
|
uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
|
|
|
|
|
|
- core->mac[PVFGORC0 + (pool * 64)] += data_size + 4;
|
|
|
|
|
|
+ core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4;
|
|
core->mac[PVFGPRC0 + (pool * 64)]++;
|
|
core->mac[PVFGPRC0 + (pool * 64)]++;
|
|
- if (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) {
|
|
|
|
|
|
+ if (pkt_type == ETH_PKT_MCAST) {
|
|
core->mac[PVFMPRC0 + (pool * 64)]++;
|
|
core->mac[PVFMPRC0 + (pool * 64)]++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1602,14 +1595,13 @@ static ssize_t
|
|
igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
bool has_vnet, bool *external_tx)
|
|
bool has_vnet, bool *external_tx)
|
|
{
|
|
{
|
|
- static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
|
|
|
|
-
|
|
|
|
uint16_t queues = 0;
|
|
uint16_t queues = 0;
|
|
uint32_t n = 0;
|
|
uint32_t n = 0;
|
|
- uint8_t min_buf[ETH_ZLEN];
|
|
|
|
|
|
+ union {
|
|
|
|
+ L2Header l2_header;
|
|
|
|
+ uint8_t octets[ETH_ZLEN];
|
|
|
|
+ } buf;
|
|
struct iovec min_iov;
|
|
struct iovec min_iov;
|
|
- struct eth_header *ehdr;
|
|
|
|
- uint8_t *filter_buf;
|
|
|
|
size_t size, orig_size;
|
|
size_t size, orig_size;
|
|
size_t iov_ofs = 0;
|
|
size_t iov_ofs = 0;
|
|
E1000E_RxRing rxr;
|
|
E1000E_RxRing rxr;
|
|
@@ -1635,24 +1627,21 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
net_rx_pkt_unset_vhdr(core->rx_pkt);
|
|
net_rx_pkt_unset_vhdr(core->rx_pkt);
|
|
}
|
|
}
|
|
|
|
|
|
- filter_buf = iov->iov_base + iov_ofs;
|
|
|
|
orig_size = iov_size(iov, iovcnt);
|
|
orig_size = iov_size(iov, iovcnt);
|
|
size = orig_size - iov_ofs;
|
|
size = orig_size - iov_ofs;
|
|
|
|
|
|
/* Pad to minimum Ethernet frame length */
|
|
/* Pad to minimum Ethernet frame length */
|
|
- if (size < sizeof(min_buf)) {
|
|
|
|
- iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size);
|
|
|
|
- memset(&min_buf[size], 0, sizeof(min_buf) - size);
|
|
|
|
|
|
+ if (size < sizeof(buf)) {
|
|
|
|
+ iov_to_buf(iov, iovcnt, iov_ofs, &buf, size);
|
|
|
|
+ memset(&buf.octets[size], 0, sizeof(buf) - size);
|
|
e1000x_inc_reg_if_not_full(core->mac, RUC);
|
|
e1000x_inc_reg_if_not_full(core->mac, RUC);
|
|
- min_iov.iov_base = filter_buf = min_buf;
|
|
|
|
- min_iov.iov_len = size = sizeof(min_buf);
|
|
|
|
|
|
+ min_iov.iov_base = &buf;
|
|
|
|
+ min_iov.iov_len = size = sizeof(buf);
|
|
iovcnt = 1;
|
|
iovcnt = 1;
|
|
iov = &min_iov;
|
|
iov = &min_iov;
|
|
iov_ofs = 0;
|
|
iov_ofs = 0;
|
|
- } else if (iov->iov_len < maximum_ethernet_hdr_len) {
|
|
|
|
- /* This is very unlikely, but may happen. */
|
|
|
|
- iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len);
|
|
|
|
- filter_buf = min_buf;
|
|
|
|
|
|
+ } else {
|
|
|
|
+ iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header));
|
|
}
|
|
}
|
|
|
|
|
|
/* Discard oversized packets if !LPE and !SBP. */
|
|
/* Discard oversized packets if !LPE and !SBP. */
|
|
@@ -1660,11 +1649,12 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
|
|
return orig_size;
|
|
return orig_size;
|
|
}
|
|
}
|
|
|
|
|
|
- ehdr = PKT_GET_ETH_HDR(filter_buf);
|
|
|
|
- net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
|
|
|
|
- net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
|
|
|
|
|
|
+ net_rx_pkt_set_packet_type(core->rx_pkt,
|
|
|
|
+ get_eth_packet_type(&buf.l2_header.eth));
|
|
|
|
+ net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs);
|
|
|
|
|
|
- queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx);
|
|
|
|
|
|
+ queues = igb_receive_assign(core, &buf.l2_header, size,
|
|
|
|
+ &rss_info, external_tx);
|
|
if (!queues) {
|
|
if (!queues) {
|
|
trace_e1000e_rx_flt_dropped();
|
|
trace_e1000e_rx_flt_dropped();
|
|
return orig_size;
|
|
return orig_size;
|
|
@@ -2464,16 +2454,16 @@ igb_set_ims(IGBCore *core, int index, uint32_t val)
|
|
static void igb_commit_icr(IGBCore *core)
|
|
static void igb_commit_icr(IGBCore *core)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
- * If GPIE.NSICR = 0, then the copy of IAM to IMS will occur only if at
|
|
|
|
|
|
+ * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
|
|
* least one bit is set in the IMS and there is a true interrupt as
|
|
* least one bit is set in the IMS and there is a true interrupt as
|
|
* reflected in ICR.INTA.
|
|
* reflected in ICR.INTA.
|
|
*/
|
|
*/
|
|
if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
|
|
if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
|
|
(core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
|
|
(core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
|
|
- igb_set_ims(core, IMS, core->mac[IAM]);
|
|
|
|
- } else {
|
|
|
|
- igb_update_interrupt_state(core);
|
|
|
|
|
|
+ igb_clear_ims_bits(core, core->mac[IAM]);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ igb_update_interrupt_state(core);
|
|
}
|
|
}
|
|
|
|
|
|
static void igb_set_icr(IGBCore *core, int index, uint32_t val)
|
|
static void igb_set_icr(IGBCore *core, int index, uint32_t val)
|