|
@@ -27,6 +27,10 @@
|
|
#include "hw/net/mii.h"
|
|
#include "hw/net/mii.h"
|
|
#include "hw/net/npcm_gmac.h"
|
|
#include "hw/net/npcm_gmac.h"
|
|
#include "migration/vmstate.h"
|
|
#include "migration/vmstate.h"
|
|
|
|
+#include "net/checksum.h"
|
|
|
|
+#include "net/eth.h"
|
|
|
|
+#include "net/net.h"
|
|
|
|
+#include "qemu/cutils.h"
|
|
#include "qemu/log.h"
|
|
#include "qemu/log.h"
|
|
#include "qemu/units.h"
|
|
#include "qemu/units.h"
|
|
#include "sysemu/dma.h"
|
|
#include "sysemu/dma.h"
|
|
@@ -149,6 +153,17 @@ static void gmac_phy_set_link(NPCMGMACState *gmac, bool active)
|
|
|
|
|
|
static bool gmac_can_receive(NetClientState *nc)
|
|
static bool gmac_can_receive(NetClientState *nc)
|
|
{
|
|
{
|
|
|
|
+ NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
|
|
|
|
+
|
|
|
|
+ /* If GMAC receive is disabled. */
|
|
|
|
+ if (!(gmac->regs[R_NPCM_GMAC_MAC_CONFIG] & NPCM_GMAC_MAC_CONFIG_RX_EN)) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* If GMAC DMA RX is stopped. */
|
|
|
|
+ if (!(gmac->regs[R_NPCM_DMA_CONTROL] & NPCM_DMA_CONTROL_START_STOP_RX)) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -192,12 +207,258 @@ static void gmac_update_irq(NPCMGMACState *gmac)
|
|
qemu_set_irq(gmac->irq, level);
|
|
qemu_set_irq(gmac->irq, level);
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len)
|
|
|
|
|
|
+static int gmac_read_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
|
|
|
|
+{
|
|
|
|
+ if (dma_memory_read(&address_space_memory, addr, desc,
|
|
|
|
+ sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
|
|
|
|
+ HWADDR_PRIx "\n", __func__, addr);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ desc->rdes0 = le32_to_cpu(desc->rdes0);
|
|
|
|
+ desc->rdes1 = le32_to_cpu(desc->rdes1);
|
|
|
|
+ desc->rdes2 = le32_to_cpu(desc->rdes2);
|
|
|
|
+ desc->rdes3 = le32_to_cpu(desc->rdes3);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int gmac_write_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
|
|
{
|
|
{
|
|
- /* Placeholder. Function will be filled in following patches */
|
|
|
|
|
|
+ struct NPCMGMACRxDesc le_desc;
|
|
|
|
+ le_desc.rdes0 = cpu_to_le32(desc->rdes0);
|
|
|
|
+ le_desc.rdes1 = cpu_to_le32(desc->rdes1);
|
|
|
|
+ le_desc.rdes2 = cpu_to_le32(desc->rdes2);
|
|
|
|
+ le_desc.rdes3 = cpu_to_le32(desc->rdes3);
|
|
|
|
+ if (dma_memory_write(&address_space_memory, addr, &le_desc,
|
|
|
|
+ sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
|
|
|
|
+ HWADDR_PRIx "\n", __func__, addr);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len,
|
|
|
|
+ uint32_t *left_frame,
|
|
|
|
+ uint32_t rx_buf_addr,
|
|
|
|
+ bool *eof_transferred,
|
|
|
|
+ const uint8_t **frame_ptr,
|
|
|
|
+ uint16_t *transferred)
|
|
|
|
+{
|
|
|
|
+ uint32_t to_transfer;
|
|
|
|
+ /*
|
|
|
|
+ * Check that buffer is bigger than the frame being transfered
|
|
|
|
+ * If bigger then transfer only whats left of frame
|
|
|
|
+ * Else, fill frame with all the content possible
|
|
|
|
+ */
|
|
|
|
+ if (rx_buf_len >= *left_frame) {
|
|
|
|
+ to_transfer = *left_frame;
|
|
|
|
+ *eof_transferred = true;
|
|
|
|
+ } else {
|
|
|
|
+ to_transfer = rx_buf_len;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* write frame part to memory */
|
|
|
|
+ if (dma_memory_write(&address_space_memory, (uint64_t) rx_buf_addr,
|
|
|
|
+ *frame_ptr, to_transfer, MEMTXATTRS_UNSPECIFIED)) {
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* update frame pointer and size of whats left of frame */
|
|
|
|
+ *frame_ptr += to_transfer;
|
|
|
|
+ *left_frame -= to_transfer;
|
|
|
|
+ *transferred += to_transfer;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gmac_dma_set_state(NPCMGMACState *gmac, int shift, uint32_t state)
|
|
|
|
+{
|
|
|
|
+ gmac->regs[R_NPCM_DMA_STATUS] = deposit32(gmac->regs[R_NPCM_DMA_STATUS],
|
|
|
|
+ shift, 3, state);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * Comments have steps that relate to the
|
|
|
|
+ * receiving process steps in pg 386
|
|
|
|
+ */
|
|
|
|
+ NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
|
|
|
|
+ uint32_t left_frame = len;
|
|
|
|
+ const uint8_t *frame_ptr = buf;
|
|
|
|
+ uint32_t desc_addr;
|
|
|
|
+ uint32_t rx_buf_len, rx_buf_addr;
|
|
|
|
+ struct NPCMGMACRxDesc rx_desc;
|
|
|
|
+ uint16_t transferred = 0;
|
|
|
|
+ bool eof_transferred = false;
|
|
|
|
+
|
|
|
|
+ trace_npcm_gmac_packet_receive(DEVICE(gmac)->canonical_path, len);
|
|
|
|
+ if (!gmac_can_receive(nc)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "GMAC Currently is not able for Rx");
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (!gmac->regs[R_NPCM_DMA_HOST_RX_DESC]) {
|
|
|
|
+ gmac->regs[R_NPCM_DMA_HOST_RX_DESC] =
|
|
|
|
+ NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]);
|
|
|
|
+ }
|
|
|
|
+ desc_addr = NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_HOST_RX_DESC]);
|
|
|
|
+
|
|
|
|
+ /* step 1 */
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE);
|
|
|
|
+ trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, desc_addr);
|
|
|
|
+ if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "RX Descriptor @ 0x%x cant be read\n",
|
|
|
|
+ desc_addr);
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* step 2 */
|
|
|
|
+ if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
+ "RX Descriptor @ 0x%x is owned by software\n",
|
|
|
|
+ desc_addr);
|
|
|
|
+ gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
|
|
|
|
+ gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
|
|
|
|
+ gmac_update_irq(gmac);
|
|
|
|
+ return len;
|
|
|
|
+ }
|
|
|
|
+ /* step 3 */
|
|
|
|
+ /*
|
|
|
|
+ * TODO --
|
|
|
|
+ * Implement all frame filtering and processing (with its own interrupts)
|
|
|
|
+ */
|
|
|
|
+ trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
|
|
|
|
+ rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
|
|
|
|
+ rx_desc.rdes3);
|
|
|
|
+ /* Clear rdes0 for the incoming descriptor and set FS in first descriptor.*/
|
|
|
|
+ rx_desc.rdes0 = RX_DESC_RDES0_FIRST_DESC_MASK;
|
|
|
|
+
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE);
|
|
|
|
+
|
|
|
|
+ /* Pad the frame with FCS as the kernel driver will strip it away. */
|
|
|
|
+ left_frame += ETH_FCS_LEN;
|
|
|
|
+
|
|
|
|
+ /* repeat while we still have frame to transfer to memory */
|
|
|
|
+ while (!eof_transferred) {
|
|
|
|
+ /* Return descriptor no matter what happens */
|
|
|
|
+ rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
|
|
|
|
+ /* Set the frame to be an IPv4/IPv6 frame. */
|
|
|
|
+ rx_desc.rdes0 |= RX_DESC_RDES0_FRM_TYPE_MASK;
|
|
|
|
+
|
|
|
|
+ /* step 4 */
|
|
|
|
+ rx_buf_len = RX_DESC_RDES1_BFFR1_SZ_MASK(rx_desc.rdes1);
|
|
|
|
+ rx_buf_addr = rx_desc.rdes2;
|
|
|
|
+ gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
|
|
|
|
+ gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, rx_buf_addr,
|
|
|
|
+ &eof_transferred, &frame_ptr,
|
|
|
|
+ &transferred);
|
|
|
|
+
|
|
|
|
+ trace_npcm_gmac_packet_receiving_buffer(DEVICE(gmac)->canonical_path,
|
|
|
|
+ rx_buf_len, rx_buf_addr);
|
|
|
|
+ /* if we still have frame left and the second buffer is not chained */
|
|
|
|
+ if (!(rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) && \
|
|
|
|
+ !eof_transferred) {
|
|
|
|
+ /* repeat process from above on buffer 2 */
|
|
|
|
+ rx_buf_len = RX_DESC_RDES1_BFFR2_SZ_MASK(rx_desc.rdes1);
|
|
|
|
+ rx_buf_addr = rx_desc.rdes3;
|
|
|
|
+ gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
|
|
|
|
+ gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame,
|
|
|
|
+ rx_buf_addr, &eof_transferred,
|
|
|
|
+ &frame_ptr, &transferred);
|
|
|
|
+ trace_npcm_gmac_packet_receiving_buffer( \
|
|
|
|
+ DEVICE(gmac)->canonical_path,
|
|
|
|
+ rx_buf_len, rx_buf_addr);
|
|
|
|
+ }
|
|
|
|
+ /* update address for descriptor */
|
|
|
|
+ gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = rx_buf_addr;
|
|
|
|
+ /* Return descriptor */
|
|
|
|
+ rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
|
|
|
|
+ /* Update frame length transferred */
|
|
|
|
+ rx_desc.rdes0 |= ((uint32_t)transferred)
|
|
|
|
+ << RX_DESC_RDES0_FRAME_LEN_SHIFT;
|
|
|
|
+ trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
|
|
|
|
+ rx_desc.rdes0, rx_desc.rdes1,
|
|
|
|
+ rx_desc.rdes2, rx_desc.rdes3);
|
|
|
|
+
|
|
|
|
+ /* step 5 */
|
|
|
|
+ gmac_write_rx_desc(desc_addr, &rx_desc);
|
|
|
|
+ trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path,
|
|
|
|
+ &rx_desc, rx_desc.rdes0,
|
|
|
|
+ rx_desc.rdes1, rx_desc.rdes2,
|
|
|
|
+ rx_desc.rdes3);
|
|
|
|
+ /* read new descriptor into rx_desc if needed*/
|
|
|
|
+ if (!eof_transferred) {
|
|
|
|
+ /* Get next descriptor address (chained or sequential) */
|
|
|
|
+ if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
|
|
|
|
+ desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
|
|
|
|
+ } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
|
|
|
|
+ desc_addr = rx_desc.rdes3;
|
|
|
|
+ } else {
|
|
|
|
+ desc_addr += sizeof(rx_desc);
|
|
|
|
+ }
|
|
|
|
+ trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path,
|
|
|
|
+ desc_addr);
|
|
|
|
+ if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
+ "RX Descriptor @ 0x%x cant be read\n",
|
|
|
|
+ desc_addr);
|
|
|
|
+ gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
|
|
|
|
+ gmac_update_irq(gmac);
|
|
|
|
+ return len;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* step 6 */
|
|
|
|
+ if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
|
|
|
|
+ if (!(gmac->regs[R_NPCM_DMA_CONTROL] & \
|
|
|
|
+ NPCM_DMA_CONTROL_FLUSH_MASK)) {
|
|
|
|
+ rx_desc.rdes0 |= RX_DESC_RDES0_DESC_ERR_MASK;
|
|
|
|
+ }
|
|
|
|
+ eof_transferred = true;
|
|
|
|
+ }
|
|
|
|
+ /* Clear rdes0 for the incoming descriptor */
|
|
|
|
+ rx_desc.rdes0 = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE);
|
|
|
|
+
|
|
|
|
+ rx_desc.rdes0 |= RX_DESC_RDES0_LAST_DESC_MASK;
|
|
|
|
+ if (!(rx_desc.rdes1 & RX_DESC_RDES1_DIS_INTR_COMP_MASK)) {
|
|
|
|
+ gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
|
|
|
|
+ gmac_update_irq(gmac);
|
|
|
|
+ }
|
|
|
|
+ trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
|
|
|
|
+ rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
|
|
|
|
+ rx_desc.rdes3);
|
|
|
|
+
|
|
|
|
+ /* step 8 */
|
|
|
|
+ gmac->regs[R_NPCM_DMA_CONTROL] |= NPCM_DMA_CONTROL_FLUSH_MASK;
|
|
|
|
+
|
|
|
|
+ /* step 9 */
|
|
|
|
+ trace_npcm_gmac_packet_received(DEVICE(gmac)->canonical_path, left_frame);
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
|
|
|
|
+ gmac_write_rx_desc(desc_addr, &rx_desc);
|
|
|
|
+
|
|
|
|
+ /* Get next descriptor address (chained or sequential) */
|
|
|
|
+ if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
|
|
|
|
+ desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
|
|
|
|
+ } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
|
|
|
|
+ desc_addr = rx_desc.rdes3;
|
|
|
|
+ } else {
|
|
|
|
+ desc_addr += sizeof(rx_desc);
|
|
|
|
+ }
|
|
|
|
+ gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr;
|
|
|
|
+ return len;
|
|
|
|
+}
|
|
|
|
+
|
|
static void gmac_cleanup(NetClientState *nc)
|
|
static void gmac_cleanup(NetClientState *nc)
|
|
{
|
|
{
|
|
/* Nothing to do yet. */
|
|
/* Nothing to do yet. */
|
|
@@ -306,6 +567,7 @@ static void npcm_gmac_write(void *opaque, hwaddr offset,
|
|
break;
|
|
break;
|
|
|
|
|
|
case A_NPCM_GMAC_MAC_CONFIG:
|
|
case A_NPCM_GMAC_MAC_CONFIG:
|
|
|
|
+ gmac->regs[offset / sizeof(uint32_t)] = v;
|
|
break;
|
|
break;
|
|
|
|
|
|
case A_NPCM_GMAC_MII_ADDR:
|
|
case A_NPCM_GMAC_MII_ADDR:
|
|
@@ -347,6 +609,8 @@ static void npcm_gmac_write(void *opaque, hwaddr offset,
|
|
|
|
|
|
case A_NPCM_DMA_RCV_POLL_DEMAND:
|
|
case A_NPCM_DMA_RCV_POLL_DEMAND:
|
|
/* We dont actually care about the value */
|
|
/* We dont actually care about the value */
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
|
|
break;
|
|
break;
|
|
|
|
|
|
case A_NPCM_DMA_STATUS:
|
|
case A_NPCM_DMA_STATUS:
|
|
@@ -357,6 +621,14 @@ static void npcm_gmac_write(void *opaque, hwaddr offset,
|
|
HWADDR_PRIx ", value: 0x%04" PRIx64 "\n",
|
|
HWADDR_PRIx ", value: 0x%04" PRIx64 "\n",
|
|
DEVICE(gmac)->canonical_path, offset, v);
|
|
DEVICE(gmac)->canonical_path, offset, v);
|
|
}
|
|
}
|
|
|
|
+ /* for W1C bits, implement W1C */
|
|
|
|
+ gmac->regs[offset / sizeof(uint32_t)] &= ~NPCM_DMA_STATUS_W1C_MASK(v);
|
|
|
|
+ if (v & NPCM_DMA_STATUS_RU) {
|
|
|
|
+ /* Clearing RU bit indicates descriptor is owned by DMA again. */
|
|
|
|
+ gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
|
|
|
|
+ NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
|
|
|
|
+ qemu_flush_queued_packets(qemu_get_queue(gmac->nic));
|
|
|
|
+ }
|
|
break;
|
|
break;
|
|
|
|
|
|
default:
|
|
default:
|