|
@@ -74,54 +74,54 @@
|
|
#define VMXNET3_MAX_NMSIX_INTRS (1)
|
|
#define VMXNET3_MAX_NMSIX_INTRS (1)
|
|
|
|
|
|
/* Macros for rings descriptors access */
|
|
/* Macros for rings descriptors access */
|
|
-#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
|
|
|
|
- (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \
|
|
|
|
+ (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
-#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
|
|
|
|
- (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
|
|
|
|
|
|
+#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \
|
|
|
|
+ (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
|
|
|
|
|
|
-#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
|
|
|
|
- (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \
|
|
|
|
+ (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
-#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
|
|
|
|
- (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
|
|
|
|
|
+#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \
|
|
|
|
+ (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
|
|
|
|
|
-#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
|
|
|
|
- (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \
|
|
|
|
+ (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
|
|
|
|
|
|
-#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
|
|
|
|
- (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
|
|
|
|
|
+#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \
|
|
|
|
+ (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
|
|
|
|
|
|
-#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
|
|
|
|
- (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \
|
|
|
|
+ (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
|
|
|
|
|
-#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
|
|
|
|
- (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \
|
|
|
|
+ (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
|
|
|
|
|
|
-#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
|
|
|
|
- (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
|
|
|
|
|
+#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \
|
|
|
|
+ (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
|
|
|
|
|
-#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
|
|
|
|
- (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
|
|
|
|
|
+#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \
|
|
|
|
+ (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
|
|
|
|
|
|
/* Macros for guest driver shared area access */
|
|
/* Macros for guest driver shared area access */
|
|
-#define VMXNET3_READ_DRV_SHARED64(shpa, field) \
|
|
|
|
- (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \
|
|
|
|
+ (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
-#define VMXNET3_READ_DRV_SHARED32(shpa, field) \
|
|
|
|
- (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \
|
|
|
|
+ (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
-#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
|
|
|
|
- (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
|
|
|
|
|
|
+#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \
|
|
|
|
+ (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
|
|
|
|
|
|
-#define VMXNET3_READ_DRV_SHARED16(shpa, field) \
|
|
|
|
- (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \
|
|
|
|
+ (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
-#define VMXNET3_READ_DRV_SHARED8(shpa, field) \
|
|
|
|
- (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
+#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \
|
|
|
|
+ (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
|
|
|
|
|
|
-#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
|
|
|
|
- (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
|
|
|
|
|
|
+#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \
|
|
|
|
+ (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
|
|
|
|
|
|
#define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
|
|
#define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
|
|
|
|
|
|
@@ -147,7 +147,8 @@ typedef struct {
|
|
uint8_t gen;
|
|
uint8_t gen;
|
|
} Vmxnet3Ring;
|
|
} Vmxnet3Ring;
|
|
|
|
|
|
-static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
|
|
|
|
|
|
+static inline void vmxnet3_ring_init(PCIDevice *d,
|
|
|
|
+ Vmxnet3Ring *ring,
|
|
hwaddr pa,
|
|
hwaddr pa,
|
|
size_t size,
|
|
size_t size,
|
|
size_t cell_size,
|
|
size_t cell_size,
|
|
@@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
|
|
ring->next = 0;
|
|
ring->next = 0;
|
|
|
|
|
|
if (zero_region) {
|
|
if (zero_region) {
|
|
- vmw_shmem_set(pa, 0, size * cell_size);
|
|
|
|
|
|
+ vmw_shmem_set(d, pa, 0, size * cell_size);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -190,14 +191,16 @@ static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring)
|
|
return ring->pa + ring->next * ring->cell_size;
|
|
return ring->pa + ring->next * ring->cell_size;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff)
|
|
|
|
|
|
+static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
|
|
|
|
+ void *buff)
|
|
{
|
|
{
|
|
- vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
|
|
|
|
|
|
+ vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff)
|
|
|
|
|
|
+static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
|
|
|
|
+ void *buff)
|
|
{
|
|
{
|
|
- vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
|
|
|
|
|
|
+ vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
|
|
}
|
|
}
|
|
|
|
|
|
static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
|
|
static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
|
|
@@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked)
|
|
vmxnet3_update_interrupt_line_state(s, lidx);
|
|
vmxnet3_update_interrupt_line_state(s, lidx);
|
|
}
|
|
}
|
|
|
|
|
|
-static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
|
|
|
|
|
|
+static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem)
|
|
{
|
|
{
|
|
- return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
|
|
|
|
|
|
+ return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC);
|
|
}
|
|
}
|
|
|
|
|
|
#define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
|
|
#define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
|
|
@@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx)
|
|
static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
|
|
static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
|
|
{
|
|
{
|
|
struct Vmxnet3_TxCompDesc txcq_descr;
|
|
struct Vmxnet3_TxCompDesc txcq_descr;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
|
|
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
|
|
|
|
|
|
txcq_descr.txdIdx = tx_ridx;
|
|
txcq_descr.txdIdx = tx_ridx;
|
|
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
|
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
|
|
|
|
|
- vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr);
|
|
|
|
|
|
+ vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr);
|
|
|
|
|
|
/* Flush changes in TX descriptor before changing the counter value */
|
|
/* Flush changes in TX descriptor before changing the counter value */
|
|
smp_wmb();
|
|
smp_wmb();
|
|
@@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s,
|
|
uint32_t *descr_idx)
|
|
uint32_t *descr_idx)
|
|
{
|
|
{
|
|
Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
|
|
Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
- vmxnet3_ring_read_curr_cell(ring, txd);
|
|
|
|
|
|
+ vmxnet3_ring_read_curr_cell(d, ring, txd);
|
|
if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
|
|
if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
|
|
/* Only read after generation field verification */
|
|
/* Only read after generation field verification */
|
|
smp_rmb();
|
|
smp_rmb();
|
|
/* Re-read to be sure we got the latest version */
|
|
/* Re-read to be sure we got the latest version */
|
|
- vmxnet3_ring_read_curr_cell(ring, txd);
|
|
|
|
|
|
+ vmxnet3_ring_read_curr_cell(d, ring, txd);
|
|
VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
|
|
VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
|
|
*descr_idx = vmxnet3_ring_curr_cell_idx(ring);
|
|
*descr_idx = vmxnet3_ring_curr_cell_idx(ring);
|
|
vmxnet3_inc_tx_consumption_counter(s, qidx);
|
|
vmxnet3_inc_tx_consumption_counter(s, qidx);
|
|
@@ -782,9 +787,11 @@ static inline void
|
|
vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
|
|
vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
|
|
struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
|
|
struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
|
|
{
|
|
{
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
+
|
|
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
|
|
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
|
|
*didx = vmxnet3_ring_curr_cell_idx(ring);
|
|
*didx = vmxnet3_ring_curr_cell_idx(ring);
|
|
- vmxnet3_ring_read_curr_cell(ring, dbuf);
|
|
|
|
|
|
+ vmxnet3_ring_read_curr_cell(d, ring, dbuf);
|
|
}
|
|
}
|
|
|
|
|
|
static inline uint8_t
|
|
static inline uint8_t
|
|
@@ -802,9 +809,8 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
|
|
hwaddr daddr =
|
|
hwaddr daddr =
|
|
vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
|
|
vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
|
|
|
|
|
|
- pci_dma_read(PCI_DEVICE(s), daddr,
|
|
|
|
- &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
|
|
|
|
-
|
|
|
|
|
|
+ pci_dma_read(PCI_DEVICE(s),
|
|
|
|
+ daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
|
|
ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
|
|
ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
|
|
|
|
|
|
if (rxcd.gen != ring_gen) {
|
|
if (rxcd.gen != ring_gen) {
|
|
@@ -1058,6 +1064,7 @@ static bool
|
|
vmxnet3_indicate_packet(VMXNET3State *s)
|
|
vmxnet3_indicate_packet(VMXNET3State *s)
|
|
{
|
|
{
|
|
struct Vmxnet3_RxDesc rxd;
|
|
struct Vmxnet3_RxDesc rxd;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
bool is_head = true;
|
|
bool is_head = true;
|
|
uint32_t rxd_idx;
|
|
uint32_t rxd_idx;
|
|
uint32_t rx_ridx = 0;
|
|
uint32_t rx_ridx = 0;
|
|
@@ -1091,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
|
|
}
|
|
}
|
|
|
|
|
|
chunk_size = MIN(bytes_left, rxd.len);
|
|
chunk_size = MIN(bytes_left, rxd.len);
|
|
- vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied,
|
|
|
|
|
|
+ vmxnet3_pci_dma_writev(d, data, bytes_copied,
|
|
le64_to_cpu(rxd.addr), chunk_size);
|
|
le64_to_cpu(rxd.addr), chunk_size);
|
|
bytes_copied += chunk_size;
|
|
bytes_copied += chunk_size;
|
|
bytes_left -= chunk_size;
|
|
bytes_left -= chunk_size;
|
|
@@ -1099,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
|
|
vmxnet3_dump_rx_descr(&rxd);
|
|
vmxnet3_dump_rx_descr(&rxd);
|
|
|
|
|
|
if (ready_rxcd_pa != 0) {
|
|
if (ready_rxcd_pa != 0) {
|
|
- pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
|
|
|
|
|
+ pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
|
}
|
|
}
|
|
|
|
|
|
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
|
|
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
|
|
@@ -1131,7 +1138,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
|
|
rxcd.eop = 1;
|
|
rxcd.eop = 1;
|
|
rxcd.err = (bytes_left != 0);
|
|
rxcd.err = (bytes_left != 0);
|
|
|
|
|
|
- pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
|
|
|
|
|
+ pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
|
|
|
|
|
|
/* Flush RX descriptor changes */
|
|
/* Flush RX descriptor changes */
|
|
smp_wmb();
|
|
smp_wmb();
|
|
@@ -1250,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s)
|
|
|
|
|
|
static void vmxnet3_update_rx_mode(VMXNET3State *s)
|
|
static void vmxnet3_update_rx_mode(VMXNET3State *s)
|
|
{
|
|
{
|
|
- s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
|
|
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
+
|
|
|
|
+ s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
|
|
devRead.rxFilterConf.rxMode);
|
|
devRead.rxFilterConf.rxMode);
|
|
VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
|
|
VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
|
|
}
|
|
}
|
|
@@ -1258,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s)
|
|
static void vmxnet3_update_vlan_filters(VMXNET3State *s)
|
|
static void vmxnet3_update_vlan_filters(VMXNET3State *s)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
/* Copy configuration from shared memory */
|
|
/* Copy configuration from shared memory */
|
|
- VMXNET3_READ_DRV_SHARED(s->drv_shmem,
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED(d, s->drv_shmem,
|
|
devRead.rxFilterConf.vfTable,
|
|
devRead.rxFilterConf.vfTable,
|
|
s->vlan_table,
|
|
s->vlan_table,
|
|
sizeof(s->vlan_table));
|
|
sizeof(s->vlan_table));
|
|
@@ -1281,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State *s)
|
|
|
|
|
|
static void vmxnet3_update_mcast_filters(VMXNET3State *s)
|
|
static void vmxnet3_update_mcast_filters(VMXNET3State *s)
|
|
{
|
|
{
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
+
|
|
uint16_t list_bytes =
|
|
uint16_t list_bytes =
|
|
- VMXNET3_READ_DRV_SHARED16(s->drv_shmem,
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem,
|
|
devRead.rxFilterConf.mfTableLen);
|
|
devRead.rxFilterConf.mfTableLen);
|
|
|
|
|
|
s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
|
|
s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
|
|
@@ -1299,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s)
|
|
} else {
|
|
} else {
|
|
int i;
|
|
int i;
|
|
hwaddr mcast_list_pa =
|
|
hwaddr mcast_list_pa =
|
|
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem,
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem,
|
|
devRead.rxFilterConf.mfTablePA);
|
|
devRead.rxFilterConf.mfTablePA);
|
|
|
|
|
|
- pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes);
|
|
|
|
|
|
+ pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes);
|
|
|
|
|
|
VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
|
|
VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
|
|
for (i = 0; i < s->mcast_list_len; i++) {
|
|
for (i = 0; i < s->mcast_list_len; i++) {
|
|
@@ -1328,19 +1340,20 @@ static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
|
|
static void vmxnet3_fill_stats(VMXNET3State *s)
|
|
static void vmxnet3_fill_stats(VMXNET3State *s)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
if (!s->device_active)
|
|
if (!s->device_active)
|
|
return;
|
|
return;
|
|
|
|
|
|
for (i = 0; i < s->txq_num; i++) {
|
|
for (i = 0; i < s->txq_num; i++) {
|
|
- pci_dma_write(PCI_DEVICE(s),
|
|
|
|
|
|
+ pci_dma_write(d,
|
|
s->txq_descr[i].tx_stats_pa,
|
|
s->txq_descr[i].tx_stats_pa,
|
|
&s->txq_descr[i].txq_stats,
|
|
&s->txq_descr[i].txq_stats,
|
|
sizeof(s->txq_descr[i].txq_stats));
|
|
sizeof(s->txq_descr[i].txq_stats));
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < s->rxq_num; i++) {
|
|
for (i = 0; i < s->rxq_num; i++) {
|
|
- pci_dma_write(PCI_DEVICE(s),
|
|
|
|
|
|
+ pci_dma_write(d,
|
|
s->rxq_descr[i].rx_stats_pa,
|
|
s->rxq_descr[i].rx_stats_pa,
|
|
&s->rxq_descr[i].rxq_stats,
|
|
&s->rxq_descr[i].rxq_stats,
|
|
sizeof(s->rxq_descr[i].rxq_stats));
|
|
sizeof(s->rxq_descr[i].rxq_stats));
|
|
@@ -1350,8 +1363,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s)
|
|
static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
|
|
static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
|
|
{
|
|
{
|
|
struct Vmxnet3_GOSInfo gos;
|
|
struct Vmxnet3_GOSInfo gos;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
- VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos,
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos,
|
|
&gos, sizeof(gos));
|
|
&gos, sizeof(gos));
|
|
s->rx_packets_compound =
|
|
s->rx_packets_compound =
|
|
(gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
|
|
(gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
|
|
@@ -1371,13 +1385,14 @@ vmxnet3_dump_conf_descr(const char *name,
|
|
static void vmxnet3_update_pm_state(VMXNET3State *s)
|
|
static void vmxnet3_update_pm_state(VMXNET3State *s)
|
|
{
|
|
{
|
|
struct Vmxnet3_VariableLenConfDesc pm_descr;
|
|
struct Vmxnet3_VariableLenConfDesc pm_descr;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
pm_descr.confLen =
|
|
pm_descr.confLen =
|
|
- VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen);
|
|
pm_descr.confVer =
|
|
pm_descr.confVer =
|
|
- VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer);
|
|
pm_descr.confPA =
|
|
pm_descr.confPA =
|
|
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA);
|
|
|
|
|
|
vmxnet3_dump_conf_descr("PM State", &pm_descr);
|
|
vmxnet3_dump_conf_descr("PM State", &pm_descr);
|
|
}
|
|
}
|
|
@@ -1386,8 +1401,9 @@ static void vmxnet3_update_features(VMXNET3State *s)
|
|
{
|
|
{
|
|
uint32_t guest_features;
|
|
uint32_t guest_features;
|
|
int rxcso_supported;
|
|
int rxcso_supported;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
- guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
|
|
|
|
|
|
+ guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
|
|
devRead.misc.uptFeatures);
|
|
devRead.misc.uptFeatures);
|
|
|
|
|
|
rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
|
|
rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
|
|
@@ -1462,12 +1478,13 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
|
|
static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
hwaddr qdescr_table_pa;
|
|
hwaddr qdescr_table_pa;
|
|
uint64_t pa;
|
|
uint64_t pa;
|
|
uint32_t size;
|
|
uint32_t size;
|
|
|
|
|
|
/* Verify configuration consistency */
|
|
/* Verify configuration consistency */
|
|
- if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
|
|
|
|
|
|
+ if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) {
|
|
VMW_ERPRN("Device configuration received from driver is invalid");
|
|
VMW_ERPRN("Device configuration received from driver is invalid");
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -1483,11 +1500,11 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
vmxnet3_update_pm_state(s);
|
|
vmxnet3_update_pm_state(s);
|
|
vmxnet3_setup_rx_filtering(s);
|
|
vmxnet3_setup_rx_filtering(s);
|
|
/* Cache fields from shared memory */
|
|
/* Cache fields from shared memory */
|
|
- s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu);
|
|
|
|
|
|
+ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
|
|
VMW_CFPRN("MTU is %u", s->mtu);
|
|
VMW_CFPRN("MTU is %u", s->mtu);
|
|
|
|
|
|
s->max_rx_frags =
|
|
s->max_rx_frags =
|
|
- VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG);
|
|
|
|
|
|
if (s->max_rx_frags == 0) {
|
|
if (s->max_rx_frags == 0) {
|
|
s->max_rx_frags = 1;
|
|
s->max_rx_frags = 1;
|
|
@@ -1496,24 +1513,24 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
|
|
VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
|
|
|
|
|
|
s->event_int_idx =
|
|
s->event_int_idx =
|
|
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx);
|
|
assert(vmxnet3_verify_intx(s, s->event_int_idx));
|
|
assert(vmxnet3_verify_intx(s, s->event_int_idx));
|
|
VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
|
|
VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
|
|
|
|
|
|
s->auto_int_masking =
|
|
s->auto_int_masking =
|
|
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask);
|
|
VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
|
|
VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
|
|
|
|
|
|
s->txq_num =
|
|
s->txq_num =
|
|
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues);
|
|
s->rxq_num =
|
|
s->rxq_num =
|
|
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues);
|
|
|
|
|
|
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
|
|
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
|
|
vmxnet3_validate_queues(s);
|
|
vmxnet3_validate_queues(s);
|
|
|
|
|
|
qdescr_table_pa =
|
|
qdescr_table_pa =
|
|
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
|
|
|
|
|
|
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA);
|
|
VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
|
|
VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1529,25 +1546,25 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
|
|
|
|
/* Read interrupt number for this TX queue */
|
|
/* Read interrupt number for this TX queue */
|
|
s->txq_descr[i].intr_idx =
|
|
s->txq_descr[i].intr_idx =
|
|
- VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx);
|
|
|
|
|
|
+ VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx);
|
|
assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
|
|
assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
|
|
|
|
|
|
VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
|
|
VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
|
|
|
|
|
|
/* Read rings memory locations for TX queues */
|
|
/* Read rings memory locations for TX queues */
|
|
- pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA);
|
|
|
|
- size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize);
|
|
|
|
|
|
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA);
|
|
|
|
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize);
|
|
|
|
|
|
- vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size,
|
|
|
|
|
|
+ vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size,
|
|
sizeof(struct Vmxnet3_TxDesc), false);
|
|
sizeof(struct Vmxnet3_TxDesc), false);
|
|
VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
|
|
VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
|
|
|
|
|
|
s->max_tx_frags += size;
|
|
s->max_tx_frags += size;
|
|
|
|
|
|
/* TXC ring */
|
|
/* TXC ring */
|
|
- pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA);
|
|
|
|
- size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize);
|
|
|
|
- vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size,
|
|
|
|
|
|
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA);
|
|
|
|
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize);
|
|
|
|
+ vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size,
|
|
sizeof(struct Vmxnet3_TxCompDesc), true);
|
|
sizeof(struct Vmxnet3_TxCompDesc), true);
|
|
VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
|
|
VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
|
|
|
|
|
|
@@ -1558,7 +1575,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
sizeof(s->txq_descr[i].txq_stats));
|
|
sizeof(s->txq_descr[i].txq_stats));
|
|
|
|
|
|
/* Fill device-managed parameters for queues */
|
|
/* Fill device-managed parameters for queues */
|
|
- VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa,
|
|
|
|
|
|
+ VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa,
|
|
ctrl.txThreshold,
|
|
ctrl.txThreshold,
|
|
VMXNET3_DEF_TX_THRESHOLD);
|
|
VMXNET3_DEF_TX_THRESHOLD);
|
|
}
|
|
}
|
|
@@ -1578,7 +1595,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
|
|
|
|
/* Read interrupt number for this RX queue */
|
|
/* Read interrupt number for this RX queue */
|
|
s->rxq_descr[i].intr_idx =
|
|
s->rxq_descr[i].intr_idx =
|
|
- VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx);
|
|
|
|
|
|
+ VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx);
|
|
assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
|
|
assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
|
|
|
|
|
|
VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
|
|
VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
|
|
@@ -1586,18 +1603,18 @@ static void vmxnet3_activate_device(VMXNET3State *s)
|
|
/* Read rings memory locations */
|
|
/* Read rings memory locations */
|
|
for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
|
|
for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
|
|
/* RX rings */
|
|
/* RX rings */
|
|
- pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]);
|
|
|
|
- size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]);
|
|
|
|
- vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size,
|
|
|
|
|
|
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]);
|
|
|
|
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]);
|
|
|
|
+ vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size,
|
|
sizeof(struct Vmxnet3_RxDesc), false);
|
|
sizeof(struct Vmxnet3_RxDesc), false);
|
|
VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
|
|
VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
|
|
i, j, pa, size);
|
|
i, j, pa, size);
|
|
}
|
|
}
|
|
|
|
|
|
/* RXC ring */
|
|
/* RXC ring */
|
|
- pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA);
|
|
|
|
- size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize);
|
|
|
|
- vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size,
|
|
|
|
|
|
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA);
|
|
|
|
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize);
|
|
|
|
+ vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size,
|
|
sizeof(struct Vmxnet3_RxCompDesc), true);
|
|
sizeof(struct Vmxnet3_RxCompDesc), true);
|
|
VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
|
|
VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
|
|
|
|
|
|
@@ -1764,19 +1781,21 @@ static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
|
|
static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
|
|
static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
|
|
{
|
|
{
|
|
uint32_t events;
|
|
uint32_t events;
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
|
|
|
|
VMW_CBPRN("Setting events: 0x%x", val);
|
|
VMW_CBPRN("Setting events: 0x%x", val);
|
|
- events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val;
|
|
|
|
- VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
|
|
|
|
|
|
+ events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val;
|
|
|
|
+ VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
|
|
}
|
|
}
|
|
|
|
|
|
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
|
|
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
|
|
{
|
|
{
|
|
|
|
+ PCIDevice *d = PCI_DEVICE(s);
|
|
uint32_t events;
|
|
uint32_t events;
|
|
|
|
|
|
VMW_CBPRN("Clearing events: 0x%x", val);
|
|
VMW_CBPRN("Clearing events: 0x%x", val);
|
|
- events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
|
|
|
|
- VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
|
|
|
|
|
|
+ events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
|
|
|
|
+ VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|