diff options
Diffstat (limited to 'drivers/staging/et131x/et1310_tx.c')
-rw-r--r-- | drivers/staging/et131x/et1310_tx.c | 622 |
1 files changed, 248 insertions, 374 deletions
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c index 94f7752e2ccc..977e8b34e7a6 100644 --- a/drivers/staging/et131x/et1310_tx.c +++ b/drivers/staging/et131x/et1310_tx.c @@ -94,13 +94,11 @@ #include "et1310_tx.h" -static void et131x_update_tcb_list(struct et131x_adapter *etdev); -static void et131x_check_send_wait_list(struct et131x_adapter *etdev); static inline void et131x_free_send_packet(struct et131x_adapter *etdev, - PMP_TCB pMpTcb); + struct tcb *tcb); static int et131x_send_packet(struct sk_buff *skb, struct et131x_adapter *etdev); -static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb); +static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb); /** * et131x_tx_dma_memory_alloc @@ -117,12 +115,12 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb); int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) { int desc_size = 0; - TX_RING_t *tx_ring = &adapter->TxRing; + struct tx_ring *tx_ring = &adapter->tx_ring; /* Allocate memory for the TCB's (Transmit Control Block) */ - adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB), - GFP_ATOMIC | GFP_DMA); - if (!adapter->TxRing.MpTcbMem) { + adapter->tx_ring.tcb_ring = (struct tcb *) + kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); + if (!adapter->tx_ring.tcb_ring) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); return -ENOMEM; } @@ -130,12 +128,13 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) /* Allocate enough memory for the Tx descriptor ring, and allocate * some extra so that the ring can be aligned on a 4k boundary. */ - desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1; - tx_ring->pTxDescRingVa = - (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size, - &tx_ring->pTxDescRingPa); - if (!adapter->TxRing.pTxDescRingVa) { - dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; + tx_ring->tx_desc_ring = + (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size, + &tx_ring->tx_desc_ring_pa); + if (!adapter->tx_ring.tx_desc_ring) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx Ring\n"); return -ENOMEM; } @@ -146,35 +145,15 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) * are ever returned, make sure the high part is retrieved here before * storing the adjusted address. */ - tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa; - - /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */ - et131x_align_allocated_memory(adapter, - &tx_ring->pTxDescRingAdjustedPa, - &tx_ring->TxDescOffset, 0x0FFF); - - tx_ring->pTxDescRingVa += tx_ring->TxDescOffset; - /* Allocate memory for the Tx status block */ - tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev, - sizeof(TX_STATUS_BLOCK_t), - &tx_ring->pTxStatusPa); - if (!adapter->TxRing.pTxStatusPa) { + tx_ring->tx_status = pci_alloc_consistent(adapter->pdev, + sizeof(u32), + &tx_ring->tx_status_pa); + if (!adapter->tx_ring.tx_status_pa) { dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx status block\n"); return -ENOMEM; } - - /* Allocate memory for a dummy buffer */ - tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev, - NIC_MIN_PACKET_SIZE, - &tx_ring->pTxDummyBlkPa); - if (!adapter->TxRing.pTxDummyBlkPa) { - dev_err(&adapter->pdev->dev, - "Cannot alloc memory for Tx dummy buffer\n"); - return -ENOMEM; - } - return 0; } @@ -188,76 +167,59 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) { int desc_size = 0; - if (adapter->TxRing.pTxDescRingVa) { + if (adapter->tx_ring.tx_desc_ring) { /* Free memory relating to Tx rings here */ - adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset; - - desc_size = - (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1; - + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + + 4096 - 1; pci_free_consistent(adapter->pdev, desc_size, - adapter->TxRing.pTxDescRingVa, - adapter->TxRing.pTxDescRingPa); - - adapter->TxRing.pTxDescRingVa = NULL; + adapter->tx_ring.tx_desc_ring, + adapter->tx_ring.tx_desc_ring_pa); + adapter->tx_ring.tx_desc_ring = NULL; } /* Free memory for the Tx status block */ - if (adapter->TxRing.pTxStatusVa) { - pci_free_consistent(adapter->pdev, - sizeof(TX_STATUS_BLOCK_t), - adapter->TxRing.pTxStatusVa, - adapter->TxRing.pTxStatusPa); - - adapter->TxRing.pTxStatusVa = NULL; - } - - /* Free memory for the dummy buffer */ - if (adapter->TxRing.pTxDummyBlkVa) { + if (adapter->tx_ring.tx_status) { pci_free_consistent(adapter->pdev, - NIC_MIN_PACKET_SIZE, - adapter->TxRing.pTxDummyBlkVa, - adapter->TxRing.pTxDummyBlkPa); + sizeof(u32), + adapter->tx_ring.tx_status, + adapter->tx_ring.tx_status_pa); - adapter->TxRing.pTxDummyBlkVa = NULL; + adapter->tx_ring.tx_status = NULL; } - - /* Free the memory for MP_TCB structures */ - kfree(adapter->TxRing.MpTcbMem); + /* Free the memory for the tcb structures */ + kfree(adapter->tx_ring.tcb_ring); } /** * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore. * @etdev: pointer to our private adapter structure + * + * Configure the transmit engine with the ring buffers we have created + * and prepare it for use. */ void ConfigTxDmaRegs(struct et131x_adapter *etdev) { struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma; /* Load the hardware with the start of the transmit descriptor ring. */ - writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32), + writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32), &txdma->pr_base_hi); - writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa, + writel((u32) etdev->tx_ring.tx_desc_ring_pa, &txdma->pr_base_lo); /* Initialise the transmit DMA engine */ - writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value); + writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); - /* Load the completion writeback physical address - * - * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses - * are ever returned, make sure the high part is retrieved here before - * storing the adjusted address. - */ - writel(0, &txdma->dma_wb_base_hi); - writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo); + /* Load the completion writeback physical address */ + writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32), + &txdma->dma_wb_base_hi); + writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); - memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t)); + *etdev->tx_ring.tx_status = 0; writel(0, &txdma->service_request); - etdev->TxRing.txDmaReadyToSend = 0; + etdev->tx_ring.send_idx = 0; } /** @@ -279,16 +241,11 @@ void et131x_tx_dma_disable(struct et131x_adapter *etdev) */ void et131x_tx_dma_enable(struct et131x_adapter *etdev) { - u32 csr = ET_TXDMA_SNGL_EPKT; - if (etdev->RegistryPhyLoopbk) - /* TxDMA is disabled for loopback operation. */ - csr |= ET_TXDMA_CSR_HALT; - else - /* Setup the transmit dma configuration register for normal - * operation - */ - csr |= PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT; - writel(csr, &etdev->regs->txdma.csr); + /* Setup the transmit dma configuration register for normal + * operation + */ + writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), + &etdev->regs->txdma.csr); } /** @@ -297,39 +254,32 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev) */ void et131x_init_send(struct et131x_adapter *adapter) { - PMP_TCB pMpTcb; - uint32_t TcbCount; - TX_RING_t *tx_ring; + struct tcb *tcb; + u32 ct; + struct tx_ring *tx_ring; /* Setup some convenience pointers */ - tx_ring = &adapter->TxRing; - pMpTcb = adapter->TxRing.MpTcbMem; + tx_ring = &adapter->tx_ring; + tcb = adapter->tx_ring.tcb_ring; - tx_ring->TCBReadyQueueHead = pMpTcb; + tx_ring->tcb_qhead = tcb; - /* Go through and set up each TCB */ - for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) { - memset(pMpTcb, 0, sizeof(MP_TCB)); + memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); + /* Go through and set up each TCB */ + for (ct = 0; ct++ < NUM_TCB; tcb++) /* Set the link pointer in HW TCB to the next TCB in the - * chain. If this is the last TCB in the chain, also set the - * tail pointer. + * chain */ - if (TcbCount < NUM_TCB - 1) { - pMpTcb->Next = pMpTcb + 1; - } else { - tx_ring->TCBReadyQueueTail = pMpTcb; - pMpTcb->Next = (PMP_TCB) NULL; - } - - pMpTcb++; - } + tcb->next = tcb + 1; + /* Set the tail pointer */ + tcb--; + tx_ring->tcb_qtail = tcb; + tcb->next = NULL; /* Curr send queue should now be empty */ - tx_ring->CurrSendHead = (PMP_TCB) NULL; - tx_ring->CurrSendTail = (PMP_TCB) NULL; - - INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue); + tx_ring->send_head = NULL; + tx_ring->send_tail = NULL; } /** @@ -352,9 +302,8 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) * to Tx, so the PacketCount and it's array used makes no sense here */ - /* Queue is not empty or TCB is not available */ - if (!list_empty(&etdev->TxRing.SendWaitQueue) || - MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) { + /* TCB is not available */ + if (etdev->tx_ring.used >= NUM_TCB) { /* NOTE: If there's an error on send, no need to queue the * packet under Linux; if we just send an error up to the * netif layer, it will resend the skb to us. @@ -364,27 +313,15 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) /* We need to see if the link is up; if it's not, make the * netif layer think we're good and drop the packet */ - /* - * if( MP_SHOULD_FAIL_SEND( etdev ) || - * etdev->DriverNoPhyAccess ) - */ - if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess - || !netif_carrier_ok(netdev)) { + if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) || + !netif_carrier_ok(netdev)) { dev_kfree_skb_any(skb); skb = NULL; etdev->net_stats.tx_dropped++; } else { status = et131x_send_packet(skb, etdev); - - if (status == -ENOMEM) { - - /* NOTE: If there's an error on send, no need - * to queue the packet under Linux; if we just - * send an error up to the netif layer, it - * will resend the skb to us. - */ - } else if (status != 0) { + if (status != 0 && status != -ENOMEM) { /* On any other error, make netif think we're * OK and drop the packet */ @@ -409,87 +346,83 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) static int et131x_send_packet(struct sk_buff *skb, struct et131x_adapter *etdev) { - int status = 0; - PMP_TCB pMpTcb = NULL; - uint16_t *shbufva; + int status; + struct tcb *tcb = NULL; + u16 *shbufva; unsigned long flags; /* All packets must have at least a MAC address and a protocol type */ - if (skb->len < ETH_HLEN) { + if (skb->len < ETH_HLEN) return -EIO; - } /* Get a TCB for this packet */ spin_lock_irqsave(&etdev->TCBReadyQLock, flags); - pMpTcb = etdev->TxRing.TCBReadyQueueHead; + tcb = etdev->tx_ring.tcb_qhead; - if (pMpTcb == NULL) { + if (tcb == NULL) { spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); return -ENOMEM; } - etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next; + etdev->tx_ring.tcb_qhead = tcb->next; - if (etdev->TxRing.TCBReadyQueueHead == NULL) - etdev->TxRing.TCBReadyQueueTail = NULL; + if (etdev->tx_ring.tcb_qhead == NULL) + etdev->tx_ring.tcb_qtail = NULL; spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); - pMpTcb->PacketLength = skb->len; - pMpTcb->Packet = skb; + tcb->skb = skb; - if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) { - shbufva = (uint16_t *) skb->data; + if (skb->data != NULL && skb->len - skb->data_len >= 6) { + shbufva = (u16 *) skb->data; if ((shbufva[0] == 0xffff) && (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { - pMpTcb->Flags |= fMP_DEST_BROAD; + tcb->flags |= fMP_DEST_BROAD; } else if ((shbufva[0] & 0x3) == 0x0001) { - pMpTcb->Flags |= fMP_DEST_MULTI; + tcb->flags |= fMP_DEST_MULTI; } } - pMpTcb->Next = NULL; + tcb->next = NULL; /* Call the NIC specific send handler. */ - if (status == 0) - status = nic_send_packet(etdev, pMpTcb); + status = nic_send_packet(etdev, tcb); if (status != 0) { spin_lock_irqsave(&etdev->TCBReadyQLock, flags); - if (etdev->TxRing.TCBReadyQueueTail) { - etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb; - } else { + if (etdev->tx_ring.tcb_qtail) + etdev->tx_ring.tcb_qtail->next = tcb; + else /* Apparently ready Q is empty. */ - etdev->TxRing.TCBReadyQueueHead = pMpTcb; - } + etdev->tx_ring.tcb_qhead = tcb; - etdev->TxRing.TCBReadyQueueTail = pMpTcb; + etdev->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); return status; } - WARN_ON(etdev->TxRing.nBusySend > NUM_TCB); + WARN_ON(etdev->tx_ring.used > NUM_TCB); return 0; } /** * nic_send_packet - NIC specific send handler for version B silicon. * @etdev: pointer to our adapter - * @pMpTcb: pointer to MP_TCB + * @tcb: pointer to struct tcb * * Returns 0 or errno. */ -static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) +static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb) { - uint32_t loopIndex; - TX_DESC_ENTRY_t CurDesc[24]; - uint32_t FragmentNumber = 0; - uint32_t thiscopy, remainder; - struct sk_buff *pPacket = pMpTcb->Packet; - uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1; - struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; + u32 i; + struct tx_desc desc[24]; /* 24 x 16 byte */ + u32 frag = 0; + u32 thiscopy, remainder; + struct sk_buff *skb = tcb->skb; + u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; + struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; unsigned long flags; /* Part of the optimizations of this send routine restrict us to @@ -500,17 +433,16 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) * number of fragments. If needed, we can call this function, * although it is less efficient. */ - if (FragListCount > 23) { + if (nr_frags > 23) return -EIO; - } - memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1)); + memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); - for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) { + for (i = 0; i < nr_frags; i++) { /* If there is something in this element, lets get a * descriptor from the ring and get the necessary data */ - if (loopIndex == 0) { + if (i == 0) { /* If the fragments are smaller than a standard MTU, * then map them to a single descriptor in the Tx * Desc ring. However, if they're larger, as is @@ -520,166 +452,164 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) * This will work until we determine why the hardware * doesn't seem to like large fragments. */ - if ((pPacket->len - pPacket->data_len) <= 1514) { - CurDesc[FragmentNumber].DataBufferPtrHigh = 0; - CurDesc[FragmentNumber].word2.bits. - length_in_bytes = - pPacket->len - pPacket->data_len; + if ((skb->len - skb->data_len) <= 1514) { + desc[frag].addr_hi = 0; + /* Low 16bits are length, high is vlan and + unused currently so zero */ + desc[frag].len_vlan = + skb->len - skb->data_len; /* NOTE: Here, the dma_addr_t returned from * pci_map_single() is implicitly cast as a - * uint32_t. Although dma_addr_t can be + * u32. Although dma_addr_t can be * 64-bit, the address returned by * pci_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ - CurDesc[FragmentNumber++].DataBufferPtrLow = + desc[frag++].addr_lo = pci_map_single(etdev->pdev, - pPacket->data, - pPacket->len - - pPacket->data_len, + skb->data, + skb->len - + skb->data_len, PCI_DMA_TODEVICE); } else { - CurDesc[FragmentNumber].DataBufferPtrHigh = 0; - CurDesc[FragmentNumber].word2.bits. - length_in_bytes = - ((pPacket->len - pPacket->data_len) / 2); + desc[frag].addr_hi = 0; + desc[frag].len_vlan = + (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * pci_map_single() is implicitly cast as a - * uint32_t. Although dma_addr_t can be + * u32. Although dma_addr_t can be * 64-bit, the address returned by * pci_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ - CurDesc[FragmentNumber++].DataBufferPtrLow = + desc[frag++].addr_lo = pci_map_single(etdev->pdev, - pPacket->data, - ((pPacket->len - - pPacket->data_len) / 2), + skb->data, + ((skb->len - + skb->data_len) / 2), PCI_DMA_TODEVICE); - CurDesc[FragmentNumber].DataBufferPtrHigh = 0; + desc[frag].addr_hi = 0; - CurDesc[FragmentNumber].word2.bits. - length_in_bytes = - ((pPacket->len - pPacket->data_len) / 2); + desc[frag].len_vlan = + (skb->len - skb->data_len) / 2; /* NOTE: Here, the dma_addr_t returned from * pci_map_single() is implicitly cast as a - * uint32_t. Although dma_addr_t can be + * u32. Although dma_addr_t can be * 64-bit, the address returned by * pci_map_single() is always 32-bit * addressable (as defined by the pci/dma * subsystem) */ - CurDesc[FragmentNumber++].DataBufferPtrLow = + desc[frag++].addr_lo = pci_map_single(etdev->pdev, - pPacket->data + - ((pPacket->len - - pPacket->data_len) / 2), - ((pPacket->len - - pPacket->data_len) / 2), + skb->data + + ((skb->len - + skb->data_len) / 2), + ((skb->len - + skb->data_len) / 2), PCI_DMA_TODEVICE); } } else { - CurDesc[FragmentNumber].DataBufferPtrHigh = 0; - CurDesc[FragmentNumber].word2.bits.length_in_bytes = - pFragList[loopIndex - 1].size; + desc[frag].addr_hi = 0; + desc[frag].len_vlan = + frags[i - 1].size; /* NOTE: Here, the dma_addr_t returned from - * pci_map_page() is implicitly cast as a uint32_t. + * pci_map_page() is implicitly cast as a u32. * Although dma_addr_t can be 64-bit, the address * returned by pci_map_page() is always 32-bit * addressable (as defined by the pci/dma subsystem) */ - CurDesc[FragmentNumber++].DataBufferPtrLow = + desc[frag++].addr_lo = pci_map_page(etdev->pdev, - pFragList[loopIndex - 1].page, - pFragList[loopIndex - 1].page_offset, - pFragList[loopIndex - 1].size, + frags[i - 1].page, + frags[i - 1].page_offset, + frags[i - 1].size, PCI_DMA_TODEVICE); } } - if (FragmentNumber == 0) + if (frag == 0) return -EIO; if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { - if (++etdev->TxRing.TxPacketsSinceLastinterrupt == - PARM_TX_NUM_BUFS_DEF) { - CurDesc[FragmentNumber - 1].word3.value = 0x5; - etdev->TxRing.TxPacketsSinceLastinterrupt = 0; - } else { - CurDesc[FragmentNumber - 1].word3.value = 0x1; + if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { + /* Last element & Interrupt flag */ + desc[frag - 1].flags = 0x5; + etdev->tx_ring.since_irq = 0; + } else { /* Last element */ + desc[frag - 1].flags = 0x1; } - } else { - CurDesc[FragmentNumber - 1].word3.value = 0x5; - } + } else + desc[frag - 1].flags = 0x5; - CurDesc[0].word3.bits.f = 1; + desc[0].flags |= 2; /* First element flag */ - pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend; - pMpTcb->PacketStaleCount = 0; + tcb->index_start = etdev->tx_ring.send_idx; + tcb->stale = 0; spin_lock_irqsave(&etdev->SendHWLock, flags); thiscopy = NUM_DESC_PER_RING_TX - - INDEX10(etdev->TxRing.txDmaReadyToSend); + INDEX10(etdev->tx_ring.send_idx); - if (thiscopy >= FragmentNumber) { + if (thiscopy >= frag) { remainder = 0; - thiscopy = FragmentNumber; + thiscopy = frag; } else { - remainder = FragmentNumber - thiscopy; + remainder = frag - thiscopy; } - memcpy(etdev->TxRing.pTxDescRingVa + - INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc, - sizeof(TX_DESC_ENTRY_t) * thiscopy); + memcpy(etdev->tx_ring.tx_desc_ring + + INDEX10(etdev->tx_ring.send_idx), desc, + sizeof(struct tx_desc) * thiscopy); - add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy); + add_10bit(&etdev->tx_ring.send_idx, thiscopy); - if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 || - INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) { - etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK; - etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP; + if (INDEX10(etdev->tx_ring.send_idx) == 0 || + INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { + etdev->tx_ring.send_idx &= ~ET_DMA10_MASK; + etdev->tx_ring.send_idx ^= ET_DMA10_WRAP; } if (remainder) { - memcpy(etdev->TxRing.pTxDescRingVa, - CurDesc + thiscopy, - sizeof(TX_DESC_ENTRY_t) * remainder); + memcpy(etdev->tx_ring.tx_desc_ring, + desc + thiscopy, + sizeof(struct tx_desc) * remainder); - add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder); + add_10bit(&etdev->tx_ring.send_idx, remainder); } - if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) { - if (etdev->TxRing.txDmaReadyToSend) - pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1; + if (INDEX10(etdev->tx_ring.send_idx) == 0) { + if (etdev->tx_ring.send_idx) + tcb->index = NUM_DESC_PER_RING_TX - 1; else - pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1); + tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); } else - pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1; + tcb->index = etdev->tx_ring.send_idx - 1; spin_lock(&etdev->TCBSendQLock); - if (etdev->TxRing.CurrSendTail) - etdev->TxRing.CurrSendTail->Next = pMpTcb; + if (etdev->tx_ring.send_tail) + etdev->tx_ring.send_tail->next = tcb; else - etdev->TxRing.CurrSendHead = pMpTcb; + etdev->tx_ring.send_head = tcb; - etdev->TxRing.CurrSendTail = pMpTcb; + etdev->tx_ring.send_tail = tcb; - WARN_ON(pMpTcb->Next != NULL); + WARN_ON(tcb->next != NULL); - etdev->TxRing.nBusySend++; + etdev->tx_ring.used++; spin_unlock(&etdev->TCBSendQLock); /* Write the new write pointer back to the device. */ - writel(etdev->TxRing.txDmaReadyToSend, + writel(etdev->tx_ring.send_idx, &etdev->regs->txdma.service_request); /* For Gig only, we use Tx Interrupt coalescing. Enable the software @@ -696,72 +626,71 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) /** - * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary + * et131x_free_send_packet - Recycle a struct tcb * @etdev: pointer to our adapter - * @pMpTcb: pointer to MP_TCB + * @tcb: pointer to struct tcb * + * Complete the packet if necessary * Assumption - Send spinlock has been acquired */ inline void et131x_free_send_packet(struct et131x_adapter *etdev, - PMP_TCB pMpTcb) + struct tcb *tcb) { unsigned long flags; - TX_DESC_ENTRY_t *desc = NULL; + struct tx_desc *desc = NULL; struct net_device_stats *stats = &etdev->net_stats; - if (pMpTcb->Flags & fMP_DEST_BROAD) + if (tcb->flags & fMP_DEST_BROAD) atomic_inc(&etdev->Stats.brdcstxmt); - else if (pMpTcb->Flags & fMP_DEST_MULTI) + else if (tcb->flags & fMP_DEST_MULTI) atomic_inc(&etdev->Stats.multixmt); else atomic_inc(&etdev->Stats.unixmt); - if (pMpTcb->Packet) { - stats->tx_bytes += pMpTcb->Packet->len; + if (tcb->skb) { + stats->tx_bytes += tcb->skb->len; /* Iterate through the TX descriptors on the ring * corresponding to this packet and umap the fragments * they point to */ do { - desc = - (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa + - INDEX10(pMpTcb->WrIndexStart)); + desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring + + INDEX10(tcb->index_start)); pci_unmap_single(etdev->pdev, - desc->DataBufferPtrLow, - desc->word2.value, PCI_DMA_TODEVICE); - - add_10bit(&pMpTcb->WrIndexStart, 1); - if (INDEX10(pMpTcb->WrIndexStart) >= - NUM_DESC_PER_RING_TX) { - pMpTcb->WrIndexStart &= ~ET_DMA10_MASK; - pMpTcb->WrIndexStart ^= ET_DMA10_WRAP; + desc->addr_lo, + desc->len_vlan, PCI_DMA_TODEVICE); + + add_10bit(&tcb->index_start, 1); + if (INDEX10(tcb->index_start) >= + NUM_DESC_PER_RING_TX) { + tcb->index_start &= ~ET_DMA10_MASK; + tcb->index_start ^= ET_DMA10_WRAP; } - } while (desc != (etdev->TxRing.pTxDescRingVa + - INDEX10(pMpTcb->WrIndex))); + } while (desc != (etdev->tx_ring.tx_desc_ring + + INDEX10(tcb->index))); - dev_kfree_skb_any(pMpTcb->Packet); + dev_kfree_skb_any(tcb->skb); } - memset(pMpTcb, 0, sizeof(MP_TCB)); + memset(tcb, 0, sizeof(struct tcb)); /* Add the TCB to the Ready Q */ spin_lock_irqsave(&etdev->TCBReadyQLock, flags); etdev->Stats.opackets++; - if (etdev->TxRing.TCBReadyQueueTail) { - etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb; - } else { + if (etdev->tx_ring.tcb_qtail) + etdev->tx_ring.tcb_qtail->next = tcb; + else /* Apparently ready Q is empty. */ - etdev->TxRing.TCBReadyQueueHead = pMpTcb; - } + etdev->tx_ring.tcb_qhead = tcb; - etdev->TxRing.TCBReadyQueueTail = pMpTcb; + etdev->tx_ring.tcb_qtail = tcb; spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); - WARN_ON(etdev->TxRing.nBusySend < 0); + WARN_ON(etdev->tx_ring.used < 0); } /** @@ -772,52 +701,40 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, */ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) { - PMP_TCB pMpTcb; - struct list_head *entry; + struct tcb *tcb; unsigned long flags; - uint32_t FreeCounter = 0; - - while (!list_empty(&etdev->TxRing.SendWaitQueue)) { - spin_lock_irqsave(&etdev->SendWaitLock, flags); - - etdev->TxRing.nWaitSend--; - spin_unlock_irqrestore(&etdev->SendWaitLock, flags); - - entry = etdev->TxRing.SendWaitQueue.next; - } - - etdev->TxRing.nWaitSend = 0; + u32 freed = 0; /* Any packets being sent? Check the first TCB on the send list */ spin_lock_irqsave(&etdev->TCBSendQLock, flags); - pMpTcb = etdev->TxRing.CurrSendHead; + tcb = etdev->tx_ring.send_head; - while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) { - PMP_TCB pNext = pMpTcb->Next; + while (tcb != NULL && freed < NUM_TCB) { + struct tcb *next = tcb->next; - etdev->TxRing.CurrSendHead = pNext; + etdev->tx_ring.send_head = next; - if (pNext == NULL) - etdev->TxRing.CurrSendTail = NULL; + if (next == NULL) + etdev->tx_ring.send_tail = NULL; - etdev->TxRing.nBusySend--; + etdev->tx_ring.used--; spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); - FreeCounter++; - et131x_free_send_packet(etdev, pMpTcb); + freed++; + et131x_free_send_packet(etdev, tcb); spin_lock_irqsave(&etdev->TCBSendQLock, flags); - pMpTcb = etdev->TxRing.CurrSendHead; + tcb = etdev->tx_ring.send_head; } - WARN_ON(FreeCounter == NUM_TCB); + WARN_ON(freed == NUM_TCB); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); - etdev->TxRing.nBusySend = 0; + etdev->tx_ring.used = 0; } /** @@ -831,99 +748,56 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) */ void et131x_handle_send_interrupt(struct et131x_adapter *etdev) { - /* Mark as completed any packets which have been sent by the device. */ - et131x_update_tcb_list(etdev); - - /* If we queued any transmits because we didn't have any TCBs earlier, - * dequeue and send those packets now, as long as we have free TCBs. - */ - et131x_check_send_wait_list(etdev); -} - -/** - * et131x_update_tcb_list - Helper routine for Send Interrupt handler - * @etdev: pointer to our adapter - * - * Re-claims the send resources and completes sends. Can also be called as - * part of the NIC send routine when the "ServiceComplete" indication has - * wrapped. - */ -static void et131x_update_tcb_list(struct et131x_adapter *etdev) -{ unsigned long flags; - u32 ServiceComplete; - PMP_TCB pMpTcb; + u32 serviced; + struct tcb *tcb; u32 index; - ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete); - index = INDEX10(ServiceComplete); + serviced = readl(&etdev->regs->txdma.NewServiceComplete); + index = INDEX10(serviced); /* Has the ring wrapped? Process any descriptors that do not have * the same "wrap" indicator as the current completion indicator */ spin_lock_irqsave(&etdev->TCBSendQLock, flags); - pMpTcb = etdev->TxRing.CurrSendHead; + tcb = etdev->tx_ring.send_head; - while (pMpTcb && - ((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) && - index < INDEX10(pMpTcb->WrIndex)) { - etdev->TxRing.nBusySend--; - etdev->TxRing.CurrSendHead = pMpTcb->Next; - if (pMpTcb->Next == NULL) - etdev->TxRing.CurrSendTail = NULL; + while (tcb && + ((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index < INDEX10(tcb->index)) { + etdev->tx_ring.used--; + etdev->tx_ring.send_head = tcb->next; + if (tcb->next == NULL) + etdev->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); - et131x_free_send_packet(etdev, pMpTcb); + et131x_free_send_packet(etdev, tcb); spin_lock_irqsave(&etdev->TCBSendQLock, flags); /* Goto the next packet */ - pMpTcb = etdev->TxRing.CurrSendHead; + tcb = etdev->tx_ring.send_head; } - while (pMpTcb && - !((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) - && index > (pMpTcb->WrIndex & ET_DMA10_MASK)) { - etdev->TxRing.nBusySend--; - etdev->TxRing.CurrSendHead = pMpTcb->Next; - if (pMpTcb->Next == NULL) - etdev->TxRing.CurrSendTail = NULL; + while (tcb && + !((serviced ^ tcb->index) & ET_DMA10_WRAP) + && index > (tcb->index & ET_DMA10_MASK)) { + etdev->tx_ring.used--; + etdev->tx_ring.send_head = tcb->next; + if (tcb->next == NULL) + etdev->tx_ring.send_tail = NULL; spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); - et131x_free_send_packet(etdev, pMpTcb); + et131x_free_send_packet(etdev, tcb); spin_lock_irqsave(&etdev->TCBSendQLock, flags); /* Goto the next packet */ - pMpTcb = etdev->TxRing.CurrSendHead; + tcb = etdev->tx_ring.send_head; } /* Wake up the queue when we hit a low-water mark */ - if (etdev->TxRing.nBusySend <= (NUM_TCB / 3)) + if (etdev->tx_ring.used <= NUM_TCB / 3) netif_wake_queue(etdev->netdev); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); } -/** - * et131x_check_send_wait_list - Helper routine for the interrupt handler - * @etdev: pointer to our adapter - * - * Takes packets from the send wait queue and posts them to the device (if - * room available). - */ -static void et131x_check_send_wait_list(struct et131x_adapter *etdev) -{ - unsigned long flags; - - spin_lock_irqsave(&etdev->SendWaitLock, flags); - - while (!list_empty(&etdev->TxRing.SendWaitQueue) && - MP_TCB_RESOURCES_AVAILABLE(etdev)) { - struct list_head *entry; - - entry = etdev->TxRing.SendWaitQueue.next; - - etdev->TxRing.nWaitSend--; - } - - spin_unlock_irqrestore(&etdev->SendWaitLock, flags); -} |