summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd')
-rw-r--r--drivers/net/ethernet/amd/Kconfig13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c363
-rw-r--r--drivers/net/ethernet/amd/declance.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h121
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c270
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c780
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c728
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c66
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c285
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h151
15 files changed, 2322 insertions, 691 deletions
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index bbaf36d9f5e1..8319c99331b0 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -182,6 +182,9 @@ config AMD_XGBE
depends on OF_NET
select PHYLIB
select AMD_XGBE_PHY
+ select BITREVERSE
+ select CRC32
+ select PTP_1588_CLOCK
---help---
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
@@ -189,4 +192,14 @@ config AMD_XGBE
To compile this driver as a module, choose M here: the module
will be called amd-xgbe.
+config AMD_XGBE_DCB
+ bool "Data Center Bridging (DCB) support"
+ default n
+ depends on AMD_XGBE && DCB
+ ---help---
+ Say Y here to enable Data Center Bridging (DCB) support in the
+ driver.
+
+ If unsure, say N.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 068dc7cad5fa..841e6558db68 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -101,7 +101,6 @@ Revision History:
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
module_param_array(speed_duplex, int, NULL, 0);
MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
module_param_array(coalesce, bool, NULL, 0);
@@ -109,17 +108,9 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
-
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-
-};
-/*
-This function will read the PHY registers.
-*/
-static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
+/* This function will read the PHY registers. */
+static int amd8111e_read_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 *val)
{
void __iomem *mmio = lp->mmio;
unsigned int reg_val;
@@ -146,10 +137,9 @@ err_phy_read:
}
-/*
-This function will write into PHY registers.
-*/
-static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
+/* This function will write into PHY registers. */
+static int amd8111e_write_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 val)
{
unsigned int repeat = REPEAT_CNT;
void __iomem *mmio = lp->mmio;
@@ -176,12 +166,11 @@ err_phy_write:
return -EINVAL;
}
-/*
-This is the mii register read function provided to the mii interface.
-*/
-static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
+
+/* This is the mii register read function provided to the mii interface. */
+static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
- struct amd8111e_priv* lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int reg_val;
amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
@@ -189,19 +178,18 @@ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
}
-/*
-This is the mii register write function provided to the mii interface.
-*/
-static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
+/* This is the mii register write function provided to the mii interface. */
+static void amd8111e_mdio_write(struct net_device *dev,
+ int phy_id, int reg_num, int val)
{
- struct amd8111e_priv* lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
amd8111e_write_phy(lp, phy_id, reg_num, val);
}
-/*
-This function will set PHY speed. During initialization sets the original speed to 100 full.
-*/
+/* This function will set PHY speed. During initialization sets
+ * the original speed to 100 full
+ */
static void amd8111e_set_ext_phy(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
@@ -240,14 +228,13 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
}
-/*
-This function will unmap skb->data space and will free
-all transmit and receive skbuffs.
-*/
+/* This function will unmap skb->data space and will free
+ * all transmit and receive skbuffs.
+ */
static int amd8111e_free_skbs(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- struct sk_buff* rx_skbuff;
+ struct sk_buff *rx_skbuff;
int i;
/* Freeing transmit skbs */
@@ -274,18 +261,18 @@ static int amd8111e_free_skbs(struct net_device *dev)
return 0;
}
-/*
-This will set the receive buffer length corresponding to the mtu size of networkinterface.
-*/
-static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
+/* This will set the receive buffer length corresponding
+ * to the mtu size of networkinterface.
+ */
+static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
{
- struct amd8111e_priv* lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int mtu = dev->mtu;
if (mtu > ETH_DATA_LEN){
/* MTU + ethernet header + FCS
- + optional VLAN tag + skb reserve space 2 */
-
+ * + optional VLAN tag + skb reserve space 2
+ */
lp->rx_buff_len = mtu + ETH_HLEN + 10;
lp->options |= OPTION_JUMBO_ENABLE;
} else{
@@ -294,8 +281,10 @@ static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
}
}
-/*
-This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
+/* This function will free all the previously allocated buffers,
+ * determine new receive buffer length and will allocate new receive buffers.
+ * This function also allocates and initializes both the transmitter
+ * and receive hardware descriptors.
*/
static int amd8111e_init_ring(struct net_device *dev)
{
@@ -376,15 +365,18 @@ err_free_tx_ring:
err_no_mem:
return -ENOMEM;
}
-/* This function will set the interrupt coalescing according to the input arguments */
-static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
+
+/* This function will set the interrupt coalescing according
+ * to the input arguments
+ */
+static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
{
unsigned int timeout;
unsigned int event_count;
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
- struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
switch(cmod)
@@ -435,9 +427,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
}
-/*
-This function initializes the device registers and starts the device.
-*/
+/* This function initializes the device registers and starts the device. */
static int amd8111e_restart(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
@@ -501,8 +491,7 @@ static int amd8111e_restart(struct net_device *dev)
/* Enable interrupt coalesce */
if(lp->options & OPTION_INTR_COAL_ENABLE){
- printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
- dev->name);
+ netdev_info(dev, "Interrupt Coalescing Enabled.\n");
amd8111e_set_coalesce(dev,ENABLE_COAL);
}
@@ -514,10 +503,9 @@ static int amd8111e_restart(struct net_device *dev)
readl(mmio+CMD0);
return 0;
}
-/*
-This function clears necessary the device registers.
-*/
-static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
+
+/* This function clears necessary the device registers. */
+static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
{
unsigned int reg_val;
unsigned int logic_filter[2] ={0,};
@@ -587,7 +575,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
writew(MIB_CLEAR, mmio + MIB_ADDR);
/* Clear LARF */
- amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
+ amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
/* SRAM_SIZE register */
reg_val = readl(mmio + SRAM_SIZE);
@@ -605,11 +593,10 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
}
-/*
-This function disables the interrupt and clears all the pending
-interrupts in INT0
+/* This function disables the interrupt and clears all the pending
+ * interrupts in INT0
*/
-static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
+static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
{
u32 intr0;
@@ -625,10 +612,8 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
}
-/*
-This function stops the chip.
-*/
-static void amd8111e_stop_chip(struct amd8111e_priv* lp)
+/* This function stops the chip. */
+static void amd8111e_stop_chip(struct amd8111e_priv *lp)
{
writel(RUN, lp->mmio + CMD0);
@@ -636,10 +621,8 @@ static void amd8111e_stop_chip(struct amd8111e_priv* lp)
readl(lp->mmio + CMD0);
}
-/*
-This function frees the transmiter and receiver descriptor rings.
-*/
-static void amd8111e_free_ring(struct amd8111e_priv* lp)
+/* This function frees the transmiter and receiver descriptor rings. */
+static void amd8111e_free_ring(struct amd8111e_priv *lp)
{
/* Free transmit and receive descriptor rings */
if(lp->rx_ring){
@@ -659,12 +642,13 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp)
}
-/*
-This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
-*/
+/* This function will free all the transmit skbs that are actually
+ * transmitted by the device. It will check the ownership of the
+ * skb before freeing the skb.
+ */
static int amd8111e_tx(struct net_device *dev)
{
- struct amd8111e_priv* lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
int status;
/* Complete all the transmit packet */
@@ -724,21 +708,20 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
goto rx_not_empty;
do{
- /* process receive packets until we use the quota*/
- /* If we own the next entry, it's a new packet. Send it up. */
+ /* process receive packets until we use the quota.
+ * If we own the next entry, it's a new packet. Send it up.
+ */
while(1) {
status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if (status & OWN_BIT)
break;
- /*
- * There is a tricky error noted by John Murphy,
+ /* There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with
* full-sized * buffers it's possible for a
* jabber packet to use two buffers, with only
* the last correctly noting the error.
*/
-
if(status & ERR_BIT) {
/* reseting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
@@ -771,7 +754,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
if (!new_skb) {
/* if allocation fail,
- ignore that pkt and go to next one */
+ * ignore that pkt and go to next one
+ */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++;
goto err_next_pkt;
@@ -812,8 +796,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
}
/* Check the interrupt status register for more packets in the
- mean time. Process them since we have not used up our quota.*/
-
+ * mean time. Process them since we have not used up our quota.
+ */
intr0 = readl(mmio + INT0);
/*Ack receive packets */
writel(intr0 & RINT0,mmio + INT0);
@@ -833,10 +817,8 @@ rx_not_empty:
return num_rx_pkt;
}
-/*
-This function will indicate the link status to the kernel.
-*/
-static int amd8111e_link_change(struct net_device* dev)
+/* This function will indicate the link status to the kernel. */
+static int amd8111e_link_change(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int status0,speed;
@@ -860,24 +842,26 @@ static int amd8111e_link_change(struct net_device* dev)
else if(speed == PHY_SPEED_100)
lp->link_config.speed = SPEED_100;
- printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
- (lp->link_config.speed == SPEED_100) ? "100": "10",
- (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
+ netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
+ (lp->link_config.speed == SPEED_100) ?
+ "100" : "10",
+ (lp->link_config.duplex == DUPLEX_FULL) ?
+ "Full" : "Half");
+
netif_carrier_on(dev);
}
else{
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_INVALID;
- printk(KERN_INFO "%s: Link is Down.\n",dev->name);
+ netdev_info(dev, "Link is Down.\n");
netif_carrier_off(dev);
}
return 0;
}
-/*
-This function reads the mib counters.
-*/
+
+/* This function reads the mib counters. */
static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
{
unsigned int status;
@@ -895,8 +879,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
return data;
}
-/*
- * This function reads the mib registers and returns the hardware statistics.
+/* This function reads the mib registers and returns the hardware statistics.
* It updates previous internal driver statistics with new values.
*/
static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
@@ -992,13 +975,14 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
return new_stats;
}
+
/* This function recalculate the interrupt coalescing mode on every interrupt
-according to the datarate and the packet rate.
-*/
+ * according to the datarate and the packet rate.
+ */
static int amd8111e_calc_coalesce(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
int tx_pkt_rate;
int rx_pkt_rate;
int tx_data_rate;
@@ -1126,13 +1110,14 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
return 0;
}
-/*
-This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
-*/
+
+/* This is device interrupt function. It handles transmit,
+ * receive,link change and hardware timer interrupts.
+ */
static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
{
- struct net_device * dev = (struct net_device *) dev_id;
+ struct net_device *dev = (struct net_device *)dev_id;
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
unsigned int intr0, intren0;
@@ -1168,7 +1153,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
- printk("************Driver bug! interrupt while in poll\n");
+ netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
@@ -1205,10 +1190,11 @@ static void amd8111e_poll(struct net_device *dev)
#endif
-/*
-This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
-*/
-static int amd8111e_close(struct net_device * dev)
+/* This function closes the network interface and updates
+ * the statistics so that most recent statistics will be
+ * available after the interface is down.
+ */
+static int amd8111e_close(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
netif_stop_queue(dev);
@@ -1238,9 +1224,11 @@ static int amd8111e_close(struct net_device * dev)
lp->opened = 0;
return 0;
}
-/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
-*/
-static int amd8111e_open(struct net_device * dev )
+
+/* This function opens new interface.It requests irq for the device,
+ * initializes the device,buffers and descriptors, and starts the device.
+ */
+static int amd8111e_open(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1264,7 +1252,7 @@ static int amd8111e_open(struct net_device * dev )
/* Start ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){
add_timer(&lp->ipg_data.ipg_timer);
- printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
+ netdev_info(dev, "Dynamic IPG Enabled\n");
}
lp->opened = 1;
@@ -1275,10 +1263,11 @@ static int amd8111e_open(struct net_device * dev )
return 0;
}
-/*
-This function checks if there is any transmit descriptors available to queue more packet.
-*/
-static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
+
+/* This function checks if there is any transmit descriptors
+ * available to queue more packet.
+ */
+static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
{
int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
if (lp->tx_skbuff[tx_index])
@@ -1287,12 +1276,14 @@ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
return 0;
}
-/*
-This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
-*/
+/* This function will queue the transmit packets to the
+ * descriptors and will trigger the send operation. It also
+ * initializes the transmit descriptors with buffer physical address,
+ * byte count, ownership to hardware etc.
+ */
static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
- struct net_device * dev)
+ struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int tx_index;
@@ -1338,9 +1329,7 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
-/*
-This function returns all the memory mapped registers of the device.
-*/
+/* This function returns all the memory mapped registers of the device. */
static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
{
void __iomem *mmio = lp->mmio;
@@ -1361,10 +1350,9 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
}
-/*
-This function sets promiscuos mode, all-multi mode or the multicast address
-list to the device.
-*/
+/* This function sets promiscuos mode, all-multi mode or the multicast address
+ * list to the device.
+ */
static void amd8111e_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1383,14 +1371,14 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->options |= OPTION_MULTICAST_ENABLE;
- amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
return;
}
if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE;
- amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
/* disable promiscuous mode */
writel(PROM, lp->mmio + CMD2);
return;
@@ -1402,14 +1390,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
- amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD2);
}
-static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
+static void amd8111e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
@@ -1501,11 +1490,11 @@ static const struct ethtool_ops ops = {
.set_wol = amd8111e_set_wol,
};
-/*
-This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
-*/
-
-static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
+/* This function handles all the ethtool ioctls. It gives driver info,
+ * gets/sets driver speed, gets memory mapped register values, forces
+ * auto negotiation, sets/gets WOL options for ethtool application.
+ */
+static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1559,9 +1548,9 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-/*
-This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
-*/
+/* This function changes the mtu of the device. It restarts the device to
+ * initialize the descriptor with new receive buffers.
+ */
static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
{
struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1572,7 +1561,8 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev)) {
/* new_mtu will be used
- when device starts netxt time */
+ * when device starts netxt time
+ */
dev->mtu = new_mtu;
return 0;
}
@@ -1591,7 +1581,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
return err;
}
-static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
+static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
{
writel( VAL1|MPPLBA, lp->mmio + CMD3);
writel( VAL0|MPEN_SW, lp->mmio + CMD7);
@@ -1601,7 +1591,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
return 0;
}
-static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
+static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
/* Adapter is already stoped/suspended/interrupt-disabled */
@@ -1612,19 +1602,18 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
return 0;
}
-/*
- * This function is called when a packet transmission fails to complete
+/* This function is called when a packet transmission fails to complete
* within a reasonable period, on the assumption that an interrupt have
* failed or the interface is locked up. This function will reinitialize
* the hardware.
*/
static void amd8111e_tx_timeout(struct net_device *dev)
{
- struct amd8111e_priv* lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
int err;
- printk(KERN_ERR "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
+
spin_lock_irq(&lp->lock);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
@@ -1701,22 +1690,10 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
return 0;
}
-
-static void amd8111e_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- if (dev) {
- unregister_netdev(dev);
- iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
- free_netdev(dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- }
-}
-static void amd8111e_config_ipg(struct net_device* dev)
+static void amd8111e_config_ipg(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- struct ipg_info* ipg_data = &lp->ipg_data;
+ struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
unsigned int total_col_cnt;
@@ -1814,27 +1791,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
{
int err, i;
unsigned long reg_addr,reg_len;
- struct amd8111e_priv* lp;
- struct net_device* dev;
+ struct amd8111e_priv *lp;
+ struct net_device *dev;
err = pci_enable_device(pdev);
if(err){
- printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
- "exiting.\n");
+ dev_err(&pdev->dev, "Cannot enable new PCI device\n");
return err;
}
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
- printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
- "exiting.\n");
+ dev_err(&pdev->dev, "Cannot find PCI base address\n");
err = -ENODEV;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, MODULE_NAME);
if(err){
- printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
- "exiting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
@@ -1842,16 +1816,14 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Find power-management capability. */
if (!pdev->pm_cap) {
- printk(KERN_ERR "amd8111e: No Power Management capability, "
- "exiting.\n");
+ dev_err(&pdev->dev, "No Power Management capability\n");
err = -ENODEV;
goto err_free_reg;
}
/* Initialize DMA */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
- printk(KERN_ERR "amd8111e: DMA not supported,"
- "exiting.\n");
+ dev_err(&pdev->dev, "DMA not supported\n");
err = -ENODEV;
goto err_free_reg;
}
@@ -1878,10 +1850,9 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
spin_lock_init(&lp->lock);
- lp->mmio = ioremap(reg_addr, reg_len);
+ lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
if (!lp->mmio) {
- printk(KERN_ERR "amd8111e: Cannot map device registers, "
- "exiting\n");
+ dev_err(&pdev->dev, "Cannot map device registers\n");
err = -ENOMEM;
goto err_free_dev;
}
@@ -1923,9 +1894,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR "amd8111e: Cannot register net device, "
- "exiting.\n");
- goto err_iounmap;
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto err_free_dev;
}
pci_set_drvdata(pdev, dev);
@@ -1942,21 +1912,17 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
}
/* display driver and device information */
-
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
- printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
- dev->name,MODULE_VERS);
- printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
- dev->name, chip_version, dev->dev_addr);
+ dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
+ dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+ chip_version, dev->dev_addr);
if (lp->ext_phy_id)
- printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
- dev->name, lp->ext_phy_id, lp->ext_phy_addr);
+ dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
+ lp->ext_phy_id, lp->ext_phy_addr);
else
- printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
- dev->name);
+ dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
+
return 0;
-err_iounmap:
- iounmap(lp->mmio);
err_free_dev:
free_netdev(dev);
@@ -1970,6 +1936,29 @@ err_disable_pdev:
}
+static void amd8111e_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static const struct pci_device_id amd8111e_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD8111E_7462,
+ },
+ {
+ .vendor = 0,
+ }
+};
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+
static struct pci_driver amd8111e_driver = {
.name = MODULE_NAME,
.id_table = amd8111e_pci_tbl,
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 57397295887c..b584b78237df 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -475,7 +475,7 @@ static void lance_init_ring(struct net_device *dev)
*lib_ptr(ib, rx_ptr, lp->type) = leptr;
if (ZERO)
printk("RX ptr: %8.8x(%8.8x)\n",
- leptr, lib_off(brx_ring, lp->type));
+ leptr, (uint)lib_off(brx_ring, lp->type));
/* Setup tx descriptor pointer */
leptr = offsetof(struct lance_init_block, btx_ring);
@@ -484,7 +484,7 @@ static void lance_init_ring(struct net_device *dev)
*lib_ptr(ib, tx_ptr, lp->type) = leptr;
if (ZERO)
printk("TX ptr: %8.8x(%8.8x)\n",
- leptr, lib_off(btx_ring, lp->type));
+ leptr, (uint)lib_off(btx_ring, lp->type));
if (ZERO)
printk("TX rings:\n");
@@ -499,8 +499,8 @@ static void lance_init_ring(struct net_device *dev)
/* The ones required by tmd2 */
*lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
if (i < 3 && ZERO)
- printk("%d: 0x%8.8x(0x%8.8x)\n",
- i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->tx_buf_ptr_cpu[i]);
}
/* Setup the Rx ring entries */
@@ -516,8 +516,8 @@ static void lance_init_ring(struct net_device *dev)
0xf000;
*lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
if (i < 3 && ZERO)
- printk("%d: 0x%8.8x(0x%8.8x)\n",
- i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->rx_buf_ptr_cpu[i]);
}
iob();
}
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 26cf9af1642f..171a7e68048d 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
- xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+ xgbe-ptp.o
+amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index bf462ee86f5c..cc25a3a9e7cf 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -170,6 +170,8 @@
#define DMA_MR_SWR_WIDTH 1
#define DMA_SBMR_EAME_INDEX 11
#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
@@ -276,13 +278,6 @@
#define MAC_PFR 0x0008
#define MAC_WTR 0x000c
#define MAC_HTR0 0x0010
-#define MAC_HTR1 0x0014
-#define MAC_HTR2 0x0018
-#define MAC_HTR3 0x001c
-#define MAC_HTR4 0x0020
-#define MAC_HTR5 0x0024
-#define MAC_HTR6 0x0028
-#define MAC_HTR7 0x002c
#define MAC_VLANTR 0x0050
#define MAC_VLANHTR 0x0058
#define MAC_VLANIR 0x0060
@@ -312,9 +307,23 @@
#define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
@@ -355,6 +364,8 @@
#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
#define MAC_HWF1R_L3L4FNUM_INDEX 27
#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
#define MAC_HWF1R_RSSEN_INDEX 20
#define MAC_HWF1R_RSSEN_WIDTH 1
#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
@@ -377,22 +388,30 @@
#define MAC_HWF2R_TXCHCNT_WIDTH 4
#define MAC_HWF2R_TXQCNT_INDEX 6
#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
#define MAC_ISR_MMCRXIS_INDEX 9
#define MAC_ISR_MMCRXIS_WIDTH 1
#define MAC_ISR_MMCTXIS_INDEX 10
#define MAC_ISR_MMCTXIS_WIDTH 1
#define MAC_ISR_PMTIS_INDEX 4
#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
#define MAC_MACA1HR_AE_INDEX 31
#define MAC_MACA1HR_AE_WIDTH 1
#define MAC_PFR_HMC_INDEX 2
#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
#define MAC_PFR_HUC_INDEX 1
#define MAC_PFR_HUC_WIDTH 1
#define MAC_PFR_PM_INDEX 4
#define MAC_PFR_PM_WIDTH 1
#define MAC_PFR_PR_INDEX 0
#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
#define MAC_PMTCSR_MGKPKTEN_INDEX 1
#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
#define MAC_PMTCSR_PWRDWN_INDEX 0
@@ -419,24 +438,80 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
#define MAC_TCR_SS_INDEX 29
#define MAC_TCR_SS_WIDTH 2
#define MAC_TCR_TE_INDEX 0
#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
#define MAC_VLANTR_DOVLTC_INDEX 20
#define MAC_VLANTR_DOVLTC_WIDTH 1
#define MAC_VLANTR_ERSVLM_INDEX 19
#define MAC_VLANTR_ERSVLM_WIDTH 1
#define MAC_VLANTR_ESVL_INDEX 18
#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
#define MAC_VLANTR_EVLS_INDEX 21
#define MAC_VLANTR_EVLS_WIDTH 2
#define MAC_VLANTR_EVLRXS_INDEX 24
#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
#define MAC_VR_DEVID_INDEX 8
#define MAC_VR_DEVID_WIDTH 8
#define MAC_VR_SNPSVER_INDEX 0
@@ -638,6 +713,8 @@
#define MTL_RQDCM_INC 4
#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
/* MTL register entry bit positions and sizes */
#define MTL_OMR_ETSALG_INDEX 5
@@ -656,9 +733,6 @@
#define MTL_Q_TQOMR 0x00
#define MTL_Q_TQUR 0x04
#define MTL_Q_TQDR 0x08
-#define MTL_Q_TCECR 0x10
-#define MTL_Q_TCESR 0x14
-#define MTL_Q_TCQWR 0x18
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c
@@ -666,8 +740,6 @@
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
-#define MTL_Q_TCQWR_QW_INDEX 0
-#define MTL_Q_TCQWR_QW_WIDTH 21
#define MTL_Q_RQOMR_EHFC_INDEX 7
#define MTL_Q_RQOMR_EHFC_WIDTH 1
#define MTL_Q_RQOMR_RFA_INDEX 8
@@ -682,6 +754,8 @@
#define MTL_Q_RQOMR_RTC_WIDTH 2
#define MTL_Q_TQOMR_FTQ_INDEX 0
#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
#define MTL_Q_TQOMR_TQS_INDEX 16
#define MTL_Q_TQOMR_TQS_WIDTH 10
#define MTL_Q_TQOMR_TSF_INDEX 1
@@ -728,10 +802,14 @@
#define MTL_TC_INC MTL_Q_INC
#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
/* MTL traffic class register entry bit positions and sizes */
#define MTL_TC_ETSCR_TSA_INDEX 0
#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
/* MTL traffic class register value */
#define MTL_TSA_SP 0x00
@@ -764,9 +842,19 @@
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
#define RX_NORMAL_DESC3_ES_INDEX 15
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
@@ -780,12 +868,19 @@
#define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
#define TX_CONTEXT_DESC2_MSS_INDEX 0
#define TX_CONTEXT_DESC2_MSS_WIDTH 15
@@ -802,6 +897,8 @@
#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
#define TX_NORMAL_DESC2_IC_INDEX 31
#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
#define TX_NORMAL_DESC2_VTIR_INDEX 14
#define TX_NORMAL_DESC2_VTIR_WIDTH 2
#define TX_NORMAL_DESC3_CIC_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
new file mode 100644
index 000000000000..7d6a49b24321
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -0,0 +1,270 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported traffic classes */
+ ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->ets) {
+ ets->cbs = pdata->ets->cbs;
+ memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, pdata->ets->prio_tc,
+ sizeof(ets->prio_tc));
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i, tc_ets, tc_ets_weight;
+
+ tc_ets = 0;
+ tc_ets_weight = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
+ DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
+ (i >= pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_ets = 1;
+ tc_ets_weight += ets->tc_tx_bw[i];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Weights must add up to 100% */
+ if (tc_ets && (tc_ets_weight != 100))
+ return -EINVAL;
+
+ if (!pdata->ets) {
+ pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+ GFP_KERNEL);
+ if (!pdata->ets)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+ pdata->hw_if.config_dcb_tc(pdata);
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported PFC traffic classes */
+ pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->pfc) {
+ pfc->pfc_en = pdata->pfc->pfc_en;
+ pfc->mbc = pdata->pfc->mbc;
+ pfc->delay = pdata->pfc->delay;
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+ if (!pdata->pfc) {
+ pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+ GFP_KERNEL);
+ if (!pdata->pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+ pdata->hw_if.config_dcb_pfc(pdata);
+
+ return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+ u8 support = xgbe_dcb_getdcbx(netdev);
+
+ DBGPR(" DCBX=%#hhx\n", dcbx);
+
+ if (dcbx & ~support)
+ return 1;
+
+ if ((dcbx & support) != support)
+ return 1;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = xgbe_dcb_ieee_getets,
+ .ieee_setets = xgbe_dcb_ieee_setets,
+ .ieee_getpfc = xgbe_dcb_ieee_getpfc,
+ .ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+ /* DCBX configuration */
+ .getdcbx = xgbe_dcb_getdcbx,
+ .setdcbx = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
+{
+ return &xgbe_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 6bb76d5c817b..346592dca33c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -151,7 +151,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
{
char workarea[32];
ssize_t len;
- unsigned int scan_value;
+ int ret;
if (*ppos != 0)
return 0;
@@ -165,9 +165,8 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
return len;
workarea[len] = '\0';
- if (sscanf(workarea, "%x", &scan_value) == 1)
- *value = scan_value;
- else
+ ret = kstrtouint(workarea, 16, value);
+ if (ret)
return -EIO;
return len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6f1c85956d50..1c5d62e8dab6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
- rdata = GET_DESC_DATA(ring, i);
+ rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_skb(pdata, rdata);
}
@@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
rdesc_dma = ring->rdesc_dma;
for (j = 0; j < ring->rdesc_count; j++) {
- rdata = GET_DESC_DATA(ring, j);
+ rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
@@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdesc_dma = ring->rdesc_dma;
for (j = 0; j < ring->rdesc_count; j++) {
- rdata = GET_DESC_DATA(ring, j);
+ rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
@@ -359,6 +359,15 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->len = 0;
rdata->interrupt = 0;
rdata->mapped_as_page = 0;
+
+ if (rdata->state_saved) {
+ rdata->state_saved = 0;
+ rdata->state.incomplete = 0;
+ rdata->state.context_next = 0;
+ rdata->state.skb = NULL;
+ rdata->state.len = 0;
+ rdata->state.error = 0;
+ }
}
static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
@@ -392,7 +401,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
if ((tso && (packet->mss != ring->tx.cur_mss)) ||
(vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
cur_index++;
- rdata = GET_DESC_DATA(ring, cur_index);
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
if (tso) {
DBGPR(" TSO packet\n");
@@ -413,12 +422,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
packet->length += packet->header_len;
cur_index++;
- rdata = GET_DESC_DATA(ring, cur_index);
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
/* Map the (remainder of the) packet */
for (datalen = skb_headlen(skb) - offset; datalen; ) {
- len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+ len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
DMA_TO_DEVICE);
@@ -437,7 +446,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
packet->length += len;
cur_index++;
- rdata = GET_DESC_DATA(ring, cur_index);
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -447,7 +456,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
offset = 0;
for (datalen = skb_frag_size(frag); datalen; ) {
- len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+ len = min_t(unsigned int, datalen,
+ XGBE_TX_MAX_BUF_SIZE);
skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
len, DMA_TO_DEVICE);
@@ -468,7 +478,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
packet->length += len;
cur_index++;
- rdata = GET_DESC_DATA(ring, cur_index);
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
}
@@ -484,7 +494,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out:
while (start_index < cur_index) {
- rdata = GET_DESC_DATA(ring, start_index++);
+ rdata = XGBE_GET_DESC_DATA(ring, start_index++);
xgbe_unmap_skb(pdata, rdata);
}
@@ -507,7 +517,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) {
- rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */
xgbe_unmap_skb(pdata, rdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 002293b0819d..edaca4496264 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -116,6 +116,8 @@
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -129,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
DBGPR("-->xgbe_usec_to_riwt\n");
- rate = clk_get_rate(pdata->sysclock);
+ rate = clk_get_rate(pdata->sysclk);
/*
* Convert the input usec value to the watchdog timer value. Each
@@ -152,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
DBGPR("-->xgbe_riwt_to_usec\n");
- rate = clk_get_rate(pdata->sysclock);
+ rate = clk_get_rate(pdata->sysclk);
/*
* Convert the input watchdog timer value to the usec value. Each
@@ -245,7 +247,7 @@ static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
return 0;
@@ -255,7 +257,7 @@ static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
return 0;
@@ -266,7 +268,7 @@ static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
{
unsigned int i;
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
return 0;
@@ -277,7 +279,7 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
{
unsigned int i;
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
return 0;
@@ -341,12 +343,12 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
unsigned int i;
/* Clear MTL flow control */
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -366,12 +368,12 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
unsigned int i;
/* Set MTL flow control */
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -405,7 +407,9 @@ static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
{
- if (pdata->tx_pause)
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->tx_pause || (pfc && pfc->pfc_en))
xgbe_enable_tx_flow_control(pdata);
else
xgbe_disable_tx_flow_control(pdata);
@@ -415,7 +419,9 @@ static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
{
- if (pdata->rx_pause)
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->rx_pause || (pfc && pfc->pfc_en))
xgbe_enable_rx_flow_control(pdata);
else
xgbe_disable_rx_flow_control(pdata);
@@ -425,8 +431,13 @@ static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
{
+ struct ieee_pfc *pfc = pdata->pfc;
+
xgbe_config_tx_flow_control(pdata);
xgbe_config_rx_flow_control(pdata);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+ (pfc && pfc->pfc_en) ? 1 : 0);
}
static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
@@ -484,14 +495,18 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
/* No MTL interrupts to be enabled */
- XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
}
}
static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
{
- /* No MAC interrupts to be enabled */
- XGMAC_IOWRITE(pdata, MAC_IER, 0);
+ unsigned int mac_ier = 0;
+
+ /* Enable Timestamp interrupt */
+ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
/* Enable all counter interrupts */
XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
@@ -547,24 +562,16 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
return 0;
}
-static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
- unsigned int am_mode)
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+ struct netdev_hw_addr *ha, unsigned int *mac_reg)
{
- struct netdev_hw_addr *ha;
- unsigned int mac_reg;
unsigned int mac_addr_hi, mac_addr_lo;
u8 *mac_addr;
- unsigned int i;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
- i = 0;
- mac_reg = MAC_MACA1HR;
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
- netdev_for_each_uc_addr(ha, pdata->netdev) {
- mac_addr_lo = 0;
- mac_addr_hi = 0;
+ if (ha) {
mac_addr = (u8 *)&mac_addr_lo;
mac_addr[0] = ha->addr[0];
mac_addr[1] = ha->addr[1];
@@ -574,54 +581,93 @@ static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
mac_addr[0] = ha->addr[4];
mac_addr[1] = ha->addr[5];
- DBGPR(" adding unicast address %pM at 0x%04x\n",
- ha->addr, mac_reg);
+ DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
+ *mac_reg);
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+ }
- XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
- mac_reg += MAC_MACA_INC;
- XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
- mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+ *mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+ *mac_reg += MAC_MACA_INC;
+}
- i++;
- }
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int addn_macs;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
- if (!am_mode) {
- netdev_for_each_mc_addr(ha, pdata->netdev) {
- mac_addr_lo = 0;
- mac_addr_hi = 0;
- mac_addr = (u8 *)&mac_addr_lo;
- mac_addr[0] = ha->addr[0];
- mac_addr[1] = ha->addr[1];
- mac_addr[2] = ha->addr[2];
- mac_addr[3] = ha->addr[3];
- mac_addr = (u8 *)&mac_addr_hi;
- mac_addr[0] = ha->addr[4];
- mac_addr[1] = ha->addr[5];
-
- DBGPR(" adding multicast address %pM at 0x%04x\n",
- ha->addr, mac_reg);
-
- XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
-
- XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
- mac_reg += MAC_MACA_INC;
- XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
- mac_reg += MAC_MACA_INC;
-
- i++;
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xgbe_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xgbe_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
}
}
/* Clear remaining additional MAC address entries */
- for (; i < pdata->hw_feat.addn_mac; i++) {
- XGMAC_IOWRITE(pdata, mac_reg, 0);
- mac_reg += MAC_MACA_INC;
- XGMAC_IOWRITE(pdata, mac_reg, 0);
- mac_reg += MAC_MACA_INC;
+ while (addn_macs--)
+ xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+ u32 crc;
+ unsigned int i;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
}
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xgbe_set_mac_hash_table(pdata);
+ else
+ xgbe_set_mac_addn_addrs(pdata);
+
return 0;
}
@@ -738,6 +784,89 @@ static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
return 0;
}
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Enable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Disable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+ return 0;
+}
+
+#ifndef CRCPOLY_LE
+#define CRCPOLY_LE 0xedb88320
+#endif
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 poly = CRCPOLY_LE;
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+ u32 crc;
+ u16 vid;
+ __le16 vid_le;
+ u16 vlan_hash_table = 0;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ /* Set the VLAN Hash Table filtering register */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+ return 0;
+}
+
static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
@@ -766,7 +895,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
- rdata = GET_DESC_DATA(ring, i);
+ rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Tx descriptor
@@ -791,7 +920,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
/* Update the starting address of descriptor ring */
- rdata = GET_DESC_DATA(ring, start_index);
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
upper_32_bits(rdata->rdesc_dma));
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
@@ -848,7 +977,7 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
- rdata = GET_DESC_DATA(ring, i);
+ rdata = XGBE_GET_DESC_DATA(ring, i);
rdesc = rdata->rdesc;
/* Initialize Rx descriptor
@@ -882,20 +1011,194 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
/* Update the starting address of descriptor ring */
- rdata = GET_DESC_DATA(ring, start_index);
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
upper_32_bits(rdata->rdesc_dma));
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
lower_32_bits(rdata->rdesc_dma));
/* Update the Rx Descriptor Tail Pointer */
- rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
lower_32_bits(rdata->rdesc_dma));
DBGPR("<--rx_desc_init\n");
}
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr;
+ u64 nsec;
+
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+ unsigned int mac_tscr)
+{
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return 0;
+
+ /* Initialize time registers */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+ xgbe_set_tstamp_time(pdata, 0, 0);
+
+ /* Initialize the timecounter */
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int total_weight, min_weight, weight;
+ unsigned int i;
+
+ if (!ets)
+ return;
+
+ /* Set Tx to deficit weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+ /* Set Traffic Class algorithms */
+ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+ min_weight = total_weight / 100;
+ if (!min_weight)
+ min_weight = 1;
+
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ DBGPR(" TC%u using SP\n", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_SP);
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ weight = total_weight * ets->tc_tx_bw[i] / 100;
+ weight = clamp(weight, min_weight, total_weight);
+
+ DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+ weight);
+ break;
+ }
+ }
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int mask, reg, reg_val;
+ unsigned int tc, prio;
+
+ if (!pfc || !ets)
+ return;
+
+ for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if ((pfc->pfc_en & (1 << prio)) &&
+ (ets->prio_tc[prio] == tc))
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ }
+
+ xgbe_config_flow_control(pdata);
+}
+
static void xgbe_pre_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -933,7 +1236,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
if (tx_coalesce && !channel->tx_timer_active)
ring->coalesce_count = 0;
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
/* Create a context descriptor if this is a TSO packet */
@@ -977,7 +1280,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
}
ring->cur++;
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
}
@@ -994,6 +1297,10 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
TX_NORMAL_DESC2_VLAN_INSERT);
+ /* Timestamp enablement check */
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
/* Set IC bit based on Tx coalescing settings */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
if (tx_coalesce && (!tx_frames ||
@@ -1034,7 +1341,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
ring->cur++;
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
/* Update buffer address */
@@ -1074,7 +1381,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
wmb();
/* Set OWN bit for the first descriptor */
- rdata = GET_DESC_DATA(ring, start_index);
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
@@ -1088,7 +1395,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
ring->cur++;
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
@@ -1113,11 +1420,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
+ struct net_device *netdev = channel->pdata->netdev;
unsigned int err, etlt;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
/* Check for data availability */
@@ -1128,6 +1436,25 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
#endif
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+ /* Timestamp Context Descriptor */
+ xgbe_get_rx_tstamp(packet, rdesc);
+
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 1);
+
/* Get the packet length */
rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
@@ -1153,7 +1480,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
if (!err || (err && !etlt)) {
- if (etlt == 0x09) {
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
VLAN_CTAG, 1);
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
@@ -1188,56 +1516,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
}
-static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
- enum xgbe_int_state int_state)
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
{
unsigned int dma_ch_ier;
- if (int_state == XGMAC_INT_STATE_SAVE) {
- channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
- channel->saved_ier &= DMA_INTERRUPT_MASK;
- } else {
- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
- dma_ch_ier |= channel->saved_ier;
- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
- }
-}
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-static int xgbe_enable_int(struct xgbe_channel *channel,
- enum xgbe_int int_id)
-{
switch (int_id) {
- case XGMAC_INT_DMA_ISR_DC0IS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
- break;
case XGMAC_INT_DMA_CH_SR_TI:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_TPS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
break;
case XGMAC_INT_DMA_CH_SR_TBU:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RI:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RBU:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RPS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_FBE:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
break;
case XGMAC_INT_DMA_ALL:
- xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+ dma_ch_ier |= channel->saved_ier;
break;
default:
return -1;
}
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
return 0;
}
@@ -1246,42 +1566,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
{
unsigned int dma_ch_ier;
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
switch (int_id) {
- case XGMAC_INT_DMA_ISR_DC0IS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
- break;
case XGMAC_INT_DMA_CH_SR_TI:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_TPS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
break;
case XGMAC_INT_DMA_CH_SR_TBU:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RI:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RBU:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RPS:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_FBE:
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
break;
case XGMAC_INT_DMA_ALL:
- xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
-
- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
- dma_ch_ier &= ~DMA_INTERRUPT_MASK;
- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
break;
default:
return -1;
}
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
return 0;
}
@@ -1311,11 +1633,11 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
/* Poll Until Poll Condition */
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+ for (i = 0; i < pdata->tx_q_count; i++) {
count = 2000;
while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
@@ -1335,6 +1657,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
/* Set the System Bus mode */
XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
}
static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
@@ -1342,23 +1665,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
unsigned int arcache, awcache;
arcache = 0;
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
awcache = 0;
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
}
@@ -1366,14 +1689,15 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
{
unsigned int i;
- /* Set Tx to weighted round robin scheduling algorithm (when
- * traffic class is using ETS algorithm)
- */
+ /* Set Tx to weighted round robin scheduling algorithm */
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
- /* Set Tx traffic classes to strict priority algorithm */
- for (i = 0; i < XGBE_TC_CNT; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
/* Set Rx to strict priority algorithm */
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
@@ -1388,66 +1712,66 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
/* Calculate Tx/Rx fifo share per queue */
switch (fifo_size) {
case 0:
- q_fifo_size = FIFO_SIZE_B(128);
+ q_fifo_size = XGBE_FIFO_SIZE_B(128);
break;
case 1:
- q_fifo_size = FIFO_SIZE_B(256);
+ q_fifo_size = XGBE_FIFO_SIZE_B(256);
break;
case 2:
- q_fifo_size = FIFO_SIZE_B(512);
+ q_fifo_size = XGBE_FIFO_SIZE_B(512);
break;
case 3:
- q_fifo_size = FIFO_SIZE_KB(1);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(1);
break;
case 4:
- q_fifo_size = FIFO_SIZE_KB(2);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(2);
break;
case 5:
- q_fifo_size = FIFO_SIZE_KB(4);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(4);
break;
case 6:
- q_fifo_size = FIFO_SIZE_KB(8);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(8);
break;
case 7:
- q_fifo_size = FIFO_SIZE_KB(16);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(16);
break;
case 8:
- q_fifo_size = FIFO_SIZE_KB(32);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(32);
break;
case 9:
- q_fifo_size = FIFO_SIZE_KB(64);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(64);
break;
case 10:
- q_fifo_size = FIFO_SIZE_KB(128);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(128);
break;
case 11:
- q_fifo_size = FIFO_SIZE_KB(256);
+ q_fifo_size = XGBE_FIFO_SIZE_KB(256);
break;
}
q_fifo_size = q_fifo_size / queue_count;
/* Set the queue fifo size programmable value */
- if (q_fifo_size >= FIFO_SIZE_KB(256))
+ if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
- else if (q_fifo_size >= FIFO_SIZE_KB(128))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
- else if (q_fifo_size >= FIFO_SIZE_KB(64))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
- else if (q_fifo_size >= FIFO_SIZE_KB(32))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
- else if (q_fifo_size >= FIFO_SIZE_KB(16))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
- else if (q_fifo_size >= FIFO_SIZE_KB(8))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
- else if (q_fifo_size >= FIFO_SIZE_KB(4))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
- else if (q_fifo_size >= FIFO_SIZE_KB(2))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
- else if (q_fifo_size >= FIFO_SIZE_KB(1))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
- else if (q_fifo_size >= FIFO_SIZE_B(512))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
p_fifo = XGMAC_MTL_FIFO_SIZE_512;
- else if (q_fifo_size >= FIFO_SIZE_B(256))
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
p_fifo = XGMAC_MTL_FIFO_SIZE_256;
return p_fifo;
@@ -1459,13 +1783,13 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
- pdata->hw_feat.tx_q_cnt);
+ pdata->tx_q_count);
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
- pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -1474,27 +1798,84 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
- pdata->hw_feat.rx_q_cnt);
+ pdata->rx_q_count);
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
- pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
}
-static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
{
- unsigned int i, reg, reg_val;
- unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int prio_queues;
+ unsigned int ppq, ppq_extra, prio;
+ unsigned int mask;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+
+ if (i < qptc_extra) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ reg_val = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ if (i < ppq_extra) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ reg += MAC_RQC2_INC;
+ reg_val = 0;
+ }
/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
reg = MTL_RQDCM0R;
reg_val = 0;
- for (i = 0; i < q_count;) {
+ for (i = 0; i < pdata->rx_q_count;) {
reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
- if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
continue;
XGMAC_IOWRITE(pdata, reg, reg_val);
@@ -1508,7 +1889,7 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
{
unsigned int i;
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+ for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
@@ -1520,6 +1901,13 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
{
xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+ }
}
static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
@@ -1541,6 +1929,18 @@ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
{
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+ /* Set the current VLAN Hash Table register value */
+ xgbe_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ else
+ xgbe_disable_rx_vlan_filtering(pdata);
+
if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
xgbe_enable_rx_vlan_stripping(pdata);
else
@@ -1881,7 +2281,7 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
}
/* Enable each Tx queue */
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
MTL_Q_ENABLED);
@@ -1898,7 +2298,7 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
/* Disable each Tx queue */
- for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
/* Disable each Tx DMA channel */
@@ -1927,7 +2327,7 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
/* Enable each Rx queue */
reg_val = 0;
- for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ for (i = 0; i < pdata->rx_q_count; i++)
reg_val |= (0x02 << (i << 1));
XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
@@ -2061,9 +2461,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
* Initialize MTL related features
*/
xgbe_config_mtl_mode(pdata);
- xgbe_config_rx_queue_mapping(pdata);
- /*TODO: Program the priorities mapped to the Selected Traffic Classes
- in MTL_TC_Prty_Map0-3 registers */
+ xgbe_config_queue_mapping(pdata);
xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
@@ -2071,15 +2469,13 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_fifo_size(pdata);
xgbe_config_rx_fifo_size(pdata);
xgbe_config_flow_control_threshold(pdata);
- /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
+ xgbe_config_dcb_tc(pdata);
+ xgbe_config_dcb_pfc(pdata);
xgbe_enable_mtl_interrupts(pdata);
- /* Transmit Class Weight */
- XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
-
/*
* Initialize MAC related features
*/
@@ -2104,7 +2500,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
- hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+ hw_if->add_mac_addresses = xgbe_add_mac_addresses;
hw_if->set_mac_address = xgbe_set_mac_address;
hw_if->enable_rx_csum = xgbe_enable_rx_csum;
@@ -2112,6 +2508,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+ hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+ hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+ hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
hw_if->read_mmd_regs = xgbe_read_mmd_regs;
hw_if->write_mmd_regs = xgbe_write_mmd_regs;
@@ -2178,5 +2577,16 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+ /* For PTP config */
+ hw_if->config_tstamp = xgbe_config_tstamp;
+ hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+ hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+ hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+ hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+ /* For Data Center Bridging config */
+ hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+ hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index cfe3d93b5f52..1f5487f4888c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -121,6 +121,8 @@
#include <net/busy_poll.h>
#include <linux/clk.h>
#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -144,9 +146,10 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if (rx_buf_size < RX_MIN_BUF_SIZE)
- rx_buf_size = RX_MIN_BUF_SIZE;
- rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+ if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
+ rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+ rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+ ~(XGBE_RX_BUF_ALIGN - 1);
return rx_buf_size;
}
@@ -155,16 +158,21 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
+ enum xgbe_int int_id;
unsigned int i;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring)
- hw_if->enable_int(channel,
- XGMAC_INT_DMA_CH_SR_TI);
- if (channel->rx_ring)
- hw_if->enable_int(channel,
- XGMAC_INT_DMA_CH_SR_RI);
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->enable_int(channel, int_id);
}
}
@@ -172,16 +180,21 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
+ enum xgbe_int int_id;
unsigned int i;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring)
- hw_if->disable_int(channel,
- XGMAC_INT_DMA_CH_SR_TI);
- if (channel->rx_ring)
- hw_if->disable_int(channel,
- XGMAC_INT_DMA_CH_SR_RI);
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->disable_int(channel, int_id);
}
}
@@ -191,7 +204,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr;
+ unsigned int mac_isr, mac_tssr;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -244,6 +257,17 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
hw_if->rx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+ mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+ if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+ /* Read Tx Timestamp to clear interrupt */
+ pdata->tx_tstamp =
+ hw_if->get_tx_tstamp(pdata);
+ schedule_work(&pdata->tx_tstamp_work);
+ }
+ }
}
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
@@ -364,6 +388,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
HASHTBLSZ);
hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
@@ -377,6 +402,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
/* The Queue and Channel counts are zero based so increment them
* to get the actual number
*/
@@ -396,9 +436,12 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
napi_enable(&pdata->napi);
}
-static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
{
napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -446,7 +489,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
break;
for (j = 0; j < ring->rdesc_count; j++) {
- rdata = GET_DESC_DATA(ring, j);
+ rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata);
}
}
@@ -471,7 +514,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
break;
for (j = 0; j < ring->rdesc_count; j++) {
- rdata = GET_DESC_DATA(ring, j);
+ rdata = XGBE_GET_DESC_DATA(ring, j);
desc_if->unmap_skb(pdata, rdata);
}
}
@@ -479,6 +522,114 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_skbuff\n");
}
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ int new_state = 0;
+
+ if (phydev == NULL)
+ return;
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct phy_device *phydev = pdata->phydev;
+ int ret;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ return ret;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+ return ret;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->phydev)
+ return;
+
+ phy_disconnect(pdata->phydev);
+}
+
int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -502,7 +653,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata);
+ xgbe_napi_disable(pdata, 0);
/* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
@@ -591,7 +742,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
phy_stop(pdata->phydev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata);
+ xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
@@ -639,6 +790,197 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+ unsigned long flags;
+
+ if (pdata->tx_tstamp) {
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ pdata->tx_tstamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ pdata->tx_tstamp_skb = NULL;
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ skb_tx_timestamp(skb);
+}
+
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (vlan_tx_tag_present(skb))
@@ -682,7 +1024,8 @@ static int xgbe_is_tso(struct sk_buff *skb)
return 1;
}
-static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, struct sk_buff *skb,
struct xgbe_packet_data *packet)
{
struct skb_frag_struct *frag;
@@ -724,16 +1067,21 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
VLAN_CTAG, 1);
}
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ PTP, 1);
+
for (len = skb_headlen(skb); len;) {
packet->rdesc_count++;
- len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
for (len = skb_frag_size(frag); len; ) {
packet->rdesc_count++;
- len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
}
}
}
@@ -747,26 +1095,38 @@ static int xgbe_open(struct net_device *netdev)
DBGPR("-->xgbe_open\n");
- /* Enable the clock */
- ret = clk_prepare_enable(pdata->sysclock);
- if (ret) {
- netdev_alert(netdev, "clk_prepare_enable failed\n");
+ /* Initialize the phy */
+ ret = xgbe_phy_init(pdata);
+ if (ret)
return ret;
+
+ /* Enable the clocks */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+ goto err_phy_init;
+ }
+
+ ret = clk_prepare_enable(pdata->ptpclk);
+ if (ret) {
+ netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+ goto err_sysclk;
}
/* Calculate the Rx buffer size before allocating rings */
ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
if (ret < 0)
- goto err_clk;
+ goto err_ptpclk;
pdata->rx_buf_size = ret;
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
- goto err_clk;
+ goto err_ptpclk;
- /* Initialize the device restart work struct */
+ /* Initialize the device restart and Tx timestamp work struct */
INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
/* Request interrupts */
ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
@@ -795,8 +1155,14 @@ err_start:
err_irq:
desc_if->free_ring_resources(pdata);
-err_clk:
- clk_disable_unprepare(pdata->sysclock);
+err_ptpclk:
+ clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+ clk_disable_unprepare(pdata->sysclk);
+
+err_phy_init:
+ xgbe_phy_exit(pdata);
return ret;
}
@@ -824,8 +1190,12 @@ static int xgbe_close(struct net_device *netdev)
pdata->irq_number = 0;
}
- /* Disable the clock */
- clk_disable_unprepare(pdata->sysclock);
+ /* Disable the clocks */
+ clk_disable_unprepare(pdata->ptpclk);
+ clk_disable_unprepare(pdata->sysclk);
+
+ /* Release the phy */
+ xgbe_phy_exit(pdata);
DBGPR("<--xgbe_close\n");
@@ -861,7 +1231,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Calculate preliminary packet info */
memset(packet, 0, sizeof(*packet));
- xgbe_packet_info(ring, skb, packet);
+ xgbe_packet_info(pdata, ring, skb, packet);
/* Check that there are enough descriptors available */
if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
@@ -885,6 +1255,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
goto tx_netdev_return;
}
+ xgbe_prep_tx_tstamp(pdata, skb, packet);
+
/* Configure required descriptor fields for transmission */
hw_if->pre_xmit(channel);
@@ -911,18 +1283,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
- if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
- pr_mode = 1;
- if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
- am_mode = 1;
- if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
- pdata->hw_feat.addn_mac)
- pr_mode = 1;
-
hw_if->set_promiscuous_mode(pdata, pr_mode);
hw_if->set_all_multicast_mode(pdata, am_mode);
- if (!pr_mode)
- hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+ hw_if->add_mac_addresses(pdata);
DBGPR("<--xgbe_set_rx_mode\n");
}
@@ -947,6 +1311,27 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ case SIOCSHWTSTAMP:
+ ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static int xgbe_change_mtu(struct net_device *netdev, int mtu)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -999,6 +1384,38 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
return s;
}
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ set_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xgbe_poll_controller(struct net_device *netdev)
{
@@ -1016,31 +1433,58 @@ static void xgbe_poll_controller(struct net_device *netdev)
}
#endif /* End CONFIG_NET_POLL_CONTROLLER */
+static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int offset, queue;
+ u8 i;
+
+ if (tc && (tc != pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (tc) {
+ netdev_set_num_tc(netdev, tc);
+ for (i = 0, queue = 0, offset = 0; i < tc; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
+
+ DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netdev_set_tc_queue(netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+ } else {
+ netdev_reset_tc(netdev);
+ }
+
+ return 0;
+}
+
static int xgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int rxcsum_enabled, rxvlan_enabled;
+ unsigned int rxcsum, rxvlan, rxvlan_filter;
- rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
- rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
- if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
hw_if->enable_rx_csum(pdata);
- netdev_alert(netdev, "state change - rxcsum enabled\n");
- } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
hw_if->disable_rx_csum(pdata);
- netdev_alert(netdev, "state change - rxcsum disabled\n");
- }
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
- netdev_alert(netdev, "state change - rxvlan enabled\n");
- } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
hw_if->disable_rx_vlan_stripping(pdata);
- netdev_alert(netdev, "state change - rxvlan disabled\n");
- }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_if->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_if->disable_rx_vlan_filtering(pdata);
pdata->netdev_features = features;
@@ -1056,11 +1500,15 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_get_stats64 = xgbe_get_stats64,
+ .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xgbe_poll_controller,
#endif
+ .ndo_setup_tc = xgbe_setup_tc,
.ndo_set_features = xgbe_set_features,
};
@@ -1069,6 +1517,22 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
return (struct net_device_ops *)&xgbe_netdev_ops;
}
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+ desc_if->realloc_skb(channel);
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1089,8 +1553,9 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
spin_lock_irqsave(&ring->lock, flags);
- while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
- rdata = GET_DESC_DATA(ring, ring->dirty);
+ while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+ (ring->dirty < ring->cur)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
if (!hw_if->tx_complete(rdesc))
@@ -1109,7 +1574,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
}
if ((ring->tx.queue_stopped == 1) &&
- (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+ (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
ring->tx.queue_stopped = 0;
netif_wake_subqueue(netdev, channel->queue_index);
}
@@ -1125,14 +1590,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
struct net_device *netdev = pdata->netdev;
struct sk_buff *skb;
- unsigned int incomplete, error;
- unsigned int cur_len, put_len, max_len;
+ struct skb_shared_hwtstamps *hwtstamps;
+ unsigned int incomplete, error, context_next, context;
+ unsigned int len, put_len, max_len;
int received = 0;
DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
@@ -1141,18 +1606,32 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (received < budget) {
DBGPR(" cur = %d\n", ring->cur);
- /* Clear the packet data information */
- memset(packet, 0, sizeof(*packet));
- skb = NULL;
- error = 0;
- cur_len = 0;
+ /* First time in loop see if we need to restore state */
+ if (!received && rdata->state_saved) {
+ incomplete = rdata->state.incomplete;
+ context_next = rdata->state.context_next;
+ skb = rdata->state.skb;
+ error = rdata->state.error;
+ len = rdata->state.len;
+ } else {
+ memset(packet, 0, sizeof(*packet));
+ incomplete = 0;
+ context_next = 0;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
read_again:
- rdata = GET_DESC_DATA(ring, ring->cur);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+ if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+ xgbe_rx_refresh(channel);
if (hw_if->dev_read(channel))
break;
@@ -1168,9 +1647,15 @@ read_again:
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
INCOMPLETE);
+ context_next = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT);
+ context = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT);
/* Earlier error, just drain the remaining data */
- if (incomplete && error)
+ if ((incomplete || context_next) && error)
goto read_again;
if (error || packet->errors) {
@@ -1180,30 +1665,37 @@ read_again:
continue;
}
- put_len = rdata->len - cur_len;
- if (skb) {
- if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
- DBGPR("pskb_expand_head error\n");
- if (incomplete) {
- error = 1;
- goto read_again;
+ if (!context) {
+ put_len = rdata->len - len;
+ if (skb) {
+ if (pskb_expand_head(skb, 0, put_len,
+ GFP_ATOMIC)) {
+ DBGPR("pskb_expand_head error\n");
+ if (incomplete) {
+ error = 1;
+ goto read_again;
+ }
+
+ dev_kfree_skb(skb);
+ continue;
}
-
- dev_kfree_skb(skb);
- continue;
+ memcpy(skb_tail_pointer(skb), rdata->skb->data,
+ put_len);
+ } else {
+ skb = rdata->skb;
+ rdata->skb = NULL;
}
- memcpy(skb_tail_pointer(skb), rdata->skb->data,
- put_len);
- } else {
- skb = rdata->skb;
- rdata->skb = NULL;
+ skb_put(skb, put_len);
+ len += put_len;
}
- skb_put(skb, put_len);
- cur_len += put_len;
- if (incomplete)
+ if (incomplete || context_next)
goto read_again;
+ /* Stray Context Descriptor? */
+ if (!skb)
+ continue;
+
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -1230,6 +1722,16 @@ read_again:
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_ctag);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ packet->rx_tstamp);
+ hwtstamps = skb_hwtstamps(skb);
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ }
+
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, channel->queue_index);
@@ -1239,14 +1741,15 @@ read_again:
napi_gro_receive(&pdata->napi, skb);
}
- if (received) {
- desc_if->realloc_skb(channel);
-
- /* Update the Rx Tail Pointer Register with address of
- * the last cleaned entry */
- rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdata->state_saved = 1;
+ rdata->state.incomplete = incomplete;
+ rdata->state.context_next = context_next;
+ rdata->state.skb = skb;
+ rdata->state.len = len;
+ rdata->state.error = error;
}
DBGPR("<--xgbe_rx_poll: received = %d\n", received);
@@ -1259,21 +1762,28 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
napi);
struct xgbe_channel *channel;
- int processed;
+ int ring_budget;
+ int processed, last_processed;
unsigned int i;
DBGPR("-->xgbe_poll: budget=%d\n", budget);
- /* Cleanup Tx ring first */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_tx_poll(channel);
-
- /* Process Rx ring next */
processed = 0;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- processed += xgbe_rx_poll(channel, budget - processed);
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xgbe_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
/* If we processed everything, we are done */
if (processed < budget) {
@@ -1296,7 +1806,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
struct xgbe_ring_desc *rdesc;
while (count--) {
- rdata = GET_DESC_DATA(ring, idx);
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 8909f2b51af1..a076aca138a1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -116,6 +116,7 @@
#include <linux/spinlock.h>
#include <linux/phy.h>
+#include <linux/net_tstamp.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -289,13 +290,9 @@ static int xgbe_get_settings(struct net_device *netdev,
if (!pdata->phydev)
return -ENODEV;
- spin_lock_irq(&pdata->lock);
-
ret = phy_ethtool_gset(pdata->phydev, cmd);
cmd->transceiver = XCVR_EXTERNAL;
- spin_unlock_irq(&pdata->lock);
-
DBGPR("<--xgbe_get_settings\n");
return ret;
@@ -314,36 +311,32 @@ static int xgbe_set_settings(struct net_device *netdev,
if (!pdata->phydev)
return -ENODEV;
- spin_lock_irq(&pdata->lock);
-
speed = ethtool_cmd_speed(cmd);
- ret = -EINVAL;
if (cmd->phy_address != phydev->addr)
- goto unlock;
+ return -EINVAL;
if ((cmd->autoneg != AUTONEG_ENABLE) &&
(cmd->autoneg != AUTONEG_DISABLE))
- goto unlock;
+ return -EINVAL;
- if ((cmd->autoneg == AUTONEG_DISABLE) &&
- (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
- (cmd->duplex != DUPLEX_FULL)))
- goto unlock;
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_2500:
+ case SPEED_1000:
+ break;
+ default:
+ return -EINVAL;
+ }
- if (cmd->autoneg == AUTONEG_ENABLE) {
- /* Clear settings needed to force speeds */
- phydev->supported &= ~SUPPORTED_1000baseT_Full;
- phydev->supported &= ~SUPPORTED_10000baseT_Full;
- } else {
- /* Add settings needed to force speed */
- phydev->supported |= SUPPORTED_1000baseT_Full;
- phydev->supported |= SUPPORTED_10000baseT_Full;
+ if (cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
}
cmd->advertising &= phydev->supported;
if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
- goto unlock;
+ return -EINVAL;
ret = 0;
phydev->autoneg = cmd->autoneg;
@@ -359,9 +352,6 @@ static int xgbe_set_settings(struct net_device *netdev,
if (netif_running(netdev))
ret = phy_start_aneg(phydev);
-unlock:
- spin_unlock_irq(&pdata->lock);
-
DBGPR("<--xgbe_set_settings\n");
return ret;
@@ -490,6 +480,39 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
+static int xgbe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pdata->ptp_clock)
+ ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops xgbe_ethtool_ops = {
.get_settings = xgbe_get_settings,
.set_settings = xgbe_set_settings,
@@ -502,6 +525,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
+ .get_ts_info = xgbe_get_ts_info,
};
struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 5a1891faba8a..8aa6a9353f7b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -245,18 +245,19 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */
- BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
- pdata->tx_desc_count = TX_DESC_CNT;
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
dev_err(dev, "tx descriptor count (%d) is not valid\n",
pdata->tx_desc_count);
ret = -EINVAL;
goto err_io;
}
- BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
- pdata->rx_desc_count = RX_DESC_CNT;
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
dev_err(dev, "rx descriptor count (%d) is not valid\n",
pdata->rx_desc_count);
@@ -265,10 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
}
/* Obtain the system clock setting */
- pdata->sysclock = devm_clk_get(dev, NULL);
- if (IS_ERR(pdata->sysclock)) {
- dev_err(dev, "devm_clk_get failed\n");
- ret = PTR_ERR(pdata->sysclock);
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->sysclk);
+ goto err_io;
+ }
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->ptpclk);
goto err_io;
}
@@ -294,8 +303,21 @@ static int xgbe_probe(struct platform_device *pdev)
/* Set the DMA mask */
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
- *(dev->dma_mask) = DMA_BIT_MASK(40);
- dev->coherent_dma_mask = DMA_BIT_MASK(40);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ }
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -336,9 +358,16 @@ static int xgbe_probe(struct platform_device *pdev)
/* Set default configuration data */
xgbe_default_config(pdata);
- /* Calculate the number of Tx and Rx rings to be created */
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues
+ */
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
if (ret) {
dev_err(dev, "error setting real tx queue count\n");
@@ -348,6 +377,7 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->rx_ring_count = min_t(unsigned int,
netif_get_num_default_rss_queues(),
pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
if (ret) {
dev_err(dev, "error setting real rx queue count\n");
@@ -373,9 +403,12 @@ static int xgbe_probe(struct platform_device *pdev)
if (ret)
goto err_bus_id;
- /* Set network and ethtool operations */
+ /* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
netdev->ethtool_ops = xgbe_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+ netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
+#endif
/* Set device features */
netdev->hw_features = NETIF_F_SG |
@@ -386,7 +419,8 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_TSO6 |
NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -397,6 +431,8 @@ static int xgbe_probe(struct platform_device *pdev)
netdev->features |= netdev->hw_features;
pdata->netdev_features = netdev->features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
xgbe_init_rx_coalesce(pdata);
xgbe_init_tx_coalesce(pdata);
@@ -407,6 +443,8 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_reg_netdev;
}
+ xgbe_ptp_register(pdata);
+
xgbe_debugfs_init(pdata);
netdev_notice(netdev, "net device enabled\n");
@@ -439,6 +477,8 @@ static int xgbe_remove(struct platform_device *pdev)
xgbe_debugfs_exit(pdata);
+ xgbe_ptp_unregister(pdata);
+
unregister_netdev(netdev);
xgbe_mdio_unregister(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index ea7a5d6750ea..6d2221e023f4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -116,7 +116,6 @@
#include <linux/module.h>
#include <linux/kmod.h>
-#include <linux/spinlock.h>
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/of.h>
@@ -158,82 +157,6 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
return 0;
}
-static void xgbe_adjust_link(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct phy_device *phydev = pdata->phydev;
- unsigned long flags;
- int new_state = 0;
-
- if (phydev == NULL)
- return;
-
- DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
- phydev->addr, phydev->link, pdata->phy_link);
-
- spin_lock_irqsave(&pdata->lock, flags);
-
- if (phydev->link) {
- /* Flow control support */
- if (pdata->pause_autoneg) {
- if (phydev->pause || phydev->asym_pause) {
- pdata->tx_pause = 1;
- pdata->rx_pause = 1;
- } else {
- pdata->tx_pause = 0;
- pdata->rx_pause = 0;
- }
- }
-
- if (pdata->tx_pause != pdata->phy_tx_pause) {
- hw_if->config_tx_flow_control(pdata);
- pdata->phy_tx_pause = pdata->tx_pause;
- }
-
- if (pdata->rx_pause != pdata->phy_rx_pause) {
- hw_if->config_rx_flow_control(pdata);
- pdata->phy_rx_pause = pdata->rx_pause;
- }
-
- /* Speed support */
- if (phydev->speed != pdata->phy_speed) {
- new_state = 1;
-
- switch (phydev->speed) {
- case SPEED_10000:
- hw_if->set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- hw_if->set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- hw_if->set_gmii_speed(pdata);
- break;
- }
- pdata->phy_speed = phydev->speed;
- }
-
- if (phydev->link != pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 1;
- }
- } else if (pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 0;
- pdata->phy_speed = SPEED_UNKNOWN;
- }
-
- if (new_state)
- phy_print_status(phydev);
-
- spin_unlock_irqrestore(&pdata->lock, flags);
-
- DBGPR_MDIO("<--xgbe_adjust_link\n");
-}
-
void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
@@ -283,7 +206,6 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
int xgbe_mdio_register(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
struct device_node *phy_node;
struct mii_bus *mii;
struct phy_device *phydev;
@@ -298,7 +220,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
return -EINVAL;
}
- /* Register with the MDIO bus */
mii = mdiobus_alloc();
if (mii == NULL) {
dev_err(pdata->dev, "mdiobus_alloc failed\n");
@@ -353,32 +274,8 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
pdata->mii = mii;
pdata->mdio_mmd = MDIO_MMD_PCS;
- pdata->phy_link = -1;
- pdata->phy_speed = SPEED_UNKNOWN;
- pdata->phy_tx_pause = pdata->tx_pause;
- pdata->phy_rx_pause = pdata->rx_pause;
-
- ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
- pdata->phy_mode);
- if (ret) {
- netdev_err(netdev, "phy_connect_direct failed\n");
- goto err_phy_device;
- }
-
- if (!phydev->drv || (phydev->drv->phy_id == 0)) {
- netdev_err(netdev, "phy_id not valid\n");
- ret = -ENODEV;
- goto err_phy_connect;
- }
- DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
- dev_name(&phydev->dev), phydev->link);
-
phydev->autoneg = pdata->default_autoneg;
if (phydev->autoneg == AUTONEG_DISABLE) {
- /* Add settings needed to force speed */
- phydev->supported |= SUPPORTED_1000baseT_Full;
- phydev->supported |= SUPPORTED_10000baseT_Full;
-
phydev->speed = pdata->default_speed;
phydev->duplex = DUPLEX_FULL;
@@ -395,9 +292,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
return 0;
-err_phy_connect:
- phy_disconnect(phydev);
-
err_phy_device:
phy_device_free(phydev);
@@ -417,7 +311,6 @@ void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_mdio_unregister\n");
- phy_disconnect(pdata->phydev);
pdata->phydev = NULL;
module_put(pdata->phy_module);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
new file mode 100644
index 000000000000..37e64cfa5718
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -0,0 +1,285 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+{
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+ tstamp_cc);
+ u64 nsec;
+
+ nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+ return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 adjust;
+ u32 addend, diff;
+ unsigned int neg_adjust = 0;
+
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ adjust = pdata->tstamp_addend;
+ adjust *= delta;
+ diff = div_u64(adjust, 1000000000UL);
+
+ addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+ pdata->tstamp_addend + diff;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ nsec += delta;
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ *ts = ns_to_timespec(nsec);
+
+ return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+ struct ptp_clock_info *info = &pdata->ptp_clock_info;
+ struct ptp_clock *clock;
+ struct cyclecounter *cc = &pdata->tstamp_cc;
+ u64 dividend;
+
+ snprintf(info->name, sizeof(info->name), "%s",
+ netdev_name(pdata->netdev));
+ info->owner = THIS_MODULE;
+ info->max_adj = clk_get_rate(pdata->ptpclk);
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime = xgbe_gettime;
+ info->settime = xgbe_settime;
+ info->enable = xgbe_enable;
+
+ clock = ptp_clock_register(info, pdata->dev);
+ if (IS_ERR(clock)) {
+ dev_err(pdata->dev, "ptp_clock_register failed\n");
+ return;
+ }
+
+ pdata->ptp_clock = clock;
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / 50Mhz)
+ * = (2^32 * 50Mhz) / PTP ref clock
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+ pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+ XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+ if (pdata->ptp_clock)
+ ptp_clock_unregister(pdata->ptp_clock);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ab0627162c01..07bf70a82908 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -121,6 +121,12 @@
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
@@ -128,22 +134,30 @@
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
-#define TX_DESC_CNT 512
-#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
-#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
-#define RX_DESC_CNT 512
+#define XGBE_TX_DESC_CNT 512
+#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT 512
-#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
-#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-#define RX_BUF_ALIGN 64
+#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN 64
#define XGBE_MAX_DMA_CHANNELS 16
-#define DMA_ARDOMAIN_SETTING 0x2
-#define DMA_ARCACHE_SETTING 0xb
-#define DMA_AWDOMAIN_SETTING 0x2
-#define DMA_AWCACHE_SETTING 0x7
-#define DMA_INTERRUPT_MASK 0x31c7
+#define XGBE_MAX_QUEUES 16
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_AXDOMAIN 0x2
+#define XGBE_DMA_OS_ARCACHE 0xb
+#define XGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_AXDOMAIN 0x3
+#define XGBE_DMA_SYS_ARCACHE 0x0
+#define XGBE_DMA_SYS_AWCACHE 0x0
+
+#define XGBE_DMA_INTERRUPT_MASK 0x31c7
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
@@ -151,45 +165,53 @@
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
-#define MAX_MULTICAST_LIST 14
-#define TX_FLAGS_IP_PKT 0x00000001
-#define TX_FLAGS_TCP_PKT 0x00000002
-
/* MDIO bus phy name */
#define XGBE_PHY_NAME "amd_xgbe_phy"
#define XGBE_PRTAD 0
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK "dma_clk"
+#define XGBE_PTP_CLOCK "ptp_clk"
+
+/* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC 20
+#define XGBE_TSTAMP_SNSINC 0
+
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
-#define FIFO_SIZE_B(x) (x)
-#define FIFO_SIZE_KB(x) (x * 1024)
+#define XGBE_FIFO_SIZE_B(x) (x)
+#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
-#define XGBE_TC_CNT 2
+#define XGBE_TC_MIN_QUANTUM 10
/* Helper macro for descriptor handling
- * Always use GET_DESC_DATA to access the descriptor data
+ * Always use XGBE_GET_DESC_DATA to access the descriptor data
* since the index is free-running and needs to be and-ed
* with the descriptor count value of the ring to index to
* the proper descriptor data.
*/
-#define GET_DESC_DATA(_ring, _idx) \
+#define XGBE_GET_DESC_DATA(_ring, _idx) \
((_ring)->rdata + \
((_idx) & ((_ring)->rdesc_count - 1)))
/* Default coalescing parameters */
-#define XGMAC_INIT_DMA_TX_USECS 100
-#define XGMAC_INIT_DMA_TX_FRAMES 16
+#define XGMAC_INIT_DMA_TX_USECS 50
+#define XGMAC_INIT_DMA_TX_FRAMES 25
#define XGMAC_MAX_DMA_RIWT 0xff
-#define XGMAC_INIT_DMA_RX_USECS 100
-#define XGMAC_INIT_DMA_RX_FRAMES 16
+#define XGMAC_INIT_DMA_RX_USECS 30
+#define XGMAC_INIT_DMA_RX_FRAMES 25
/* Flow control queue count */
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE 8
struct xgbe_prv_data;
@@ -207,6 +229,8 @@ struct xgbe_packet_data {
unsigned short mss;
unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
};
/* Common Rx and Tx descriptor mapping */
@@ -219,7 +243,7 @@ struct xgbe_ring_desc {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the GET_DESC_DATA macro to access this data from the ring)
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
@@ -235,6 +259,20 @@ struct xgbe_ring_data {
unsigned int interrupt; /* Interrupt indicator */
unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ unsigned int incomplete;
+ unsigned int context_next;
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
};
struct xgbe_ring {
@@ -250,7 +288,7 @@ struct xgbe_ring {
unsigned int rdesc_count;
/* Array of descriptor data corresponding the descriptor memory
- * (always use the GET_DESC_DATA macro to access this data)
+ * (always use the XGBE_GET_DESC_DATA macro to access this data)
*/
struct xgbe_ring_data *rdata;
@@ -304,13 +342,13 @@ struct xgbe_channel {
} ____cacheline_aligned;
enum xgbe_int {
- XGMAC_INT_DMA_ISR_DC0IS,
XGMAC_INT_DMA_CH_SR_TI,
XGMAC_INT_DMA_CH_SR_TPS,
XGMAC_INT_DMA_CH_SR_TBU,
XGMAC_INT_DMA_CH_SR_RI,
XGMAC_INT_DMA_CH_SR_RBU,
XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_TI_RI,
XGMAC_INT_DMA_CH_SR_FBE,
XGMAC_INT_DMA_ALL,
};
@@ -386,7 +424,7 @@ struct xgbe_hw_if {
int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
- int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+ int (*add_mac_addresses)(struct xgbe_prv_data *);
int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
int (*enable_rx_csum)(struct xgbe_prv_data *);
@@ -394,6 +432,9 @@ struct xgbe_hw_if {
int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*update_vlan_hash_table)(struct xgbe_prv_data *);
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
@@ -457,6 +498,18 @@ struct xgbe_hw_if {
void (*rx_mmc_int)(struct xgbe_prv_data *);
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+ /* For Timestamp config */
+ int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+ void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+ void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+ unsigned int nsec);
+ u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+ u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+ /* For Data Center Bridging config */
+ void (*config_dcb_tc)(struct xgbe_prv_data *);
+ void (*config_dcb_pfc)(struct xgbe_prv_data *);
};
struct xgbe_desc_if {
@@ -498,6 +551,7 @@ struct xgbe_hw_features {
unsigned int tso; /* TCP Segmentation Offload */
unsigned int dma_debug; /* DMA Debug Registers */
unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
unsigned int hash_table_size; /* Hash Table Size */
unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
@@ -530,6 +584,11 @@ struct xgbe_prv_data {
struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if;
+ /* AXI DMA settings */
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int channel_count;
@@ -538,6 +597,9 @@ struct xgbe_prv_data {
unsigned int rx_ring_count;
unsigned int rx_desc_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
/* Tx/Rx common settings */
unsigned int pblx8;
@@ -589,8 +651,30 @@ struct xgbe_prv_data {
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
- /* System clock value used for Rx watchdog */
- struct clk *sysclock;
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ struct clk *sysclk;
+ struct clk *ptpclk;
+
+ /* Timestamp support */
+ spinlock_t tstamp_lock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_clock *ptp_clock;
+ struct hwtstamp_config tstamp_config;
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ unsigned int tstamp_addend;
+ struct work_struct tx_tstamp_work;
+ struct sk_buff *tx_tstamp_skb;
+ u64 tx_tstamp;
+
+ /* DCB support */
+ struct ieee_ets *ets;
+ struct ieee_pfc *pfc;
+ unsigned int q2tc_map[XGBE_MAX_QUEUES];
+ unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
/* Hardware features of the device */
struct xgbe_hw_features hw_feat;
@@ -617,10 +701,15 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
struct net_device_ops *xgbe_get_netdev_ops(void);
struct ethtool_ops *xgbe_get_ethtool_ops(void);
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
+#endif
int xgbe_mdio_register(struct xgbe_prv_data *);
void xgbe_mdio_unregister(struct xgbe_prv_data *);
void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_ptp_register(struct xgbe_prv_data *);
+void xgbe_ptp_unregister(struct xgbe_prv_data *);
void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
unsigned int);
void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,