diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
29 files changed, 2056 insertions, 1160 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 89f40e51fc13..be2989e60009 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2012 Intel Corporation. +# Copyright(c) 1999 - 2013 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -32,13 +32,13 @@ obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o -ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o +ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 30efc9f0f47a..a8e10cff7a89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -35,12 +35,11 @@ #include <linux/cpumask.h> #include <linux/aer.h> #include <linux/if_vlan.h> +#include <linux/jiffies.h> -#ifdef CONFIG_IXGBE_PTP #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#endif /* CONFIG_IXGBE_PTP */ #include "ixgbe_type.h" #include "ixgbe_common.h" @@ -93,21 +92,26 @@ */ #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 -#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) - /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) -#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) -#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) -#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) +enum ixgbe_tx_flags { + /* cmd_type flags */ + IXGBE_TX_FLAGS_HW_VLAN = 0x01, + IXGBE_TX_FLAGS_TSO = 0x02, + IXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IXGBE_TX_FLAGS_CC = 0x08, + IXGBE_TX_FLAGS_IPV4 = 0x10, + IXGBE_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + IXGBE_TX_FLAGS_SW_VLAN = 0x40, + IXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -135,6 +139,7 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; + unsigned int vf_api; }; struct vf_macvlans { @@ -151,7 +156,7 @@ struct vf_macvlans { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ @@ -196,6 +201,7 @@ struct ixgbe_rx_queue_stats { enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_RX_RSC_ENABLED, @@ -225,6 +231,7 @@ struct ixgbe_ring { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; + unsigned long last_rx_timestamp; unsigned long state; u8 __iomem *tail; dma_addr_t dma; /* phys. address of descriptor ring */ @@ -272,15 +279,10 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 64 -#ifdef IXGBE_FCOE +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ #define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#else -#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES -#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES -#endif /* IXGBE_FCOE */ +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -482,8 +484,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12) /* Tx fast path data */ int num_tx_queues; @@ -571,17 +574,17 @@ struct ixgbe_adapter { u32 interrupt_event; u32 led_reg; -#ifdef CONFIG_IXGBE_PTP struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; - int rx_hwtstamp_filter; u32 base_incval; - u32 cycle_speed; -#endif /* CONFIG_IXGBE_PTP */ /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -600,6 +603,8 @@ struct ixgbe_adapter { #ifdef CONFIG_DEBUG_FS struct dentry *ixgbe_dbg_adapter; #endif /*CONFIG_DEBUG_FS*/ + + u8 default_up; }; struct ixgbe_fdir_filter { @@ -615,6 +620,7 @@ enum ixgbe_state_t { __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, + __IXGBE_READ_I2C, }; struct ixgbe_cb { @@ -691,11 +697,12 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); +extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); extern void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); -extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); #endif +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_do_reset(struct net_device *netdev); #ifdef CONFIG_IXGBE_HWMON @@ -739,19 +746,35 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) return netdev_get_tx_queue(ring->netdev, ring->queue_index); } -#ifdef CONFIG_IXGBE_PTP extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); -extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); +extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + return; + + __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); + + /* + * Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd); extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); -#endif /* CONFIG_IXGBE_PTP */ +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); +#endif #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 42537336110c..d0113fc97b6f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,7 +41,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); @@ -633,15 +632,15 @@ out: * ixgbe_setup_mac_link_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if auto-negotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { + bool autoneg = false; s32 status = 0; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -685,20 +684,18 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); @@ -1006,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) } /** - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr * @eeprom_data: value read * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + * Performs 8 byte read operation to SFP module's data over I2C interface. **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) { s32 status = 0; u16 sfp_addr = 0; @@ -1028,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ - sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; + sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, @@ -1060,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, *eeprom_data = (u8)(sfp_data >> 8); } else { status = IXGBE_ERR_PHY; - goto out; } out: @@ -1068,6 +1065,36 @@ out: } /** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type * @hw: pointer to hardware structure * @@ -1300,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, .check_overtemp = &ixgbe_tn_check_overtemp, }; @@ -1311,4 +1339,3 @@ struct ixgbe_info ixgbe_82598_info = { .eeprom_ops = &eeprom_ops_82598, .phy_ops = &phy_ops_82598, }; - diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 1077cb2b38db..203a00c24330 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -45,24 +45,19 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { @@ -99,9 +94,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; - u32 reg_anlp1 = 0; - u32 i = 0; u16 list_offset, data_offset, data_value; + bool got_lock = false; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -137,28 +131,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); - /* Now restart DSP by setting Restart_AN and clearing LMS */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, - IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | - IXGBE_AUTOC_AN_RESTART)); - - /* Wait for AN to leave state 0 */ - for (i = 0; i < 10; i++) { - usleep_range(4000, 8000); - reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) - break; + /* Need SW/FW semaphore around AUTOC writes if LESM on, + * likewise reset_pipeline requires lock as it also writes + * AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto setup_sfp_out; + + got_lock = true; + } + + /* Restart DSP and set SFI mode */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL)); + + ret_val = ixgbe_reset_pipeline_82599(hw); + + if (got_lock) { + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + got_lock = false; } - if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { - hw_dbg(hw, "sfp module setup not complete\n"); + + if (ret_val) { + hw_dbg(hw, " sfp module setup not complete\n"); ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; goto setup_sfp_out; } - - /* Restart DSP by setting Restart_AN and return to SFI mode */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, - IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | - IXGBE_AUTOC_AN_RESTART)); } setup_sfp_out: @@ -228,13 +230,13 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) * ixgbe_get_link_capabilities_82599 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed - * @negotiation: true when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *negotiation) + bool *autoneg) { s32 status = 0; u32 autoc = 0; @@ -245,7 +247,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; goto out; } @@ -262,22 +264,22 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_10G_SERIAL: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; + *autoneg = false; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: @@ -289,7 +291,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: @@ -300,12 +302,12 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; break; case IXGBE_AUTOC_LMS_SGMII_1G_100M: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; - *negotiation = false; + *autoneg = false; break; default: @@ -317,7 +319,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; + *autoneg = true; } out: @@ -394,14 +396,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, u32 links_reg; u32 i; s32 status = 0; + bool got_lock = false; + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + goto out; + + got_lock = true; + } /* Restart link */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == @@ -425,6 +439,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, /* Add delay to filter out noises during initial link setup */ msleep(50); +out: return status; } @@ -491,14 +506,12 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; @@ -508,11 +521,11 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); u32 i = 0; bool link_up = false; - bool negotiation; + bool autoneg = false; /* Mask off requested but non-supported speeds */ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, - &negotiation); + &autoneg); if (status != 0) return status; @@ -545,7 +558,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_10GB_FULL, - autoneg, autoneg_wait_to_complete); if (status != 0) return status; @@ -598,7 +610,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_1GB_FULL, - autoneg, autoneg_wait_to_complete); if (status != 0) return status; @@ -627,7 +638,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, - autoneg, autoneg_wait_to_complete); out: @@ -647,13 +657,12 @@ out: * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Implements the Intel SmartSpeed algorithm. **/ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, + ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status = 0; @@ -684,7 +693,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* First, try to get link with full advertisement */ hw->phy.smart_speed_active = false; for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; @@ -719,7 +728,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* Turn SmartSpeed on to disable KR support */ hw->phy.smart_speed_active = true; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; @@ -745,7 +754,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, /* We didn't get link. Turn SmartSpeed back off. */ hw->phy.smart_speed_active = false; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); out: @@ -759,14 +768,13 @@ out: * ixgbe_setup_mac_link_82599 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { s32 status = 0; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -779,6 +787,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, u32 links_reg; u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + bool got_lock = false; + bool autoneg = false; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, @@ -836,9 +846,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, } if (autoc != start_autoc) { + /* Need SW/FW semaphore around AUTOC writes if LESM is on, + * likewise reset_pipeline requires us to hold this lock as + * it also writes to AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status != 0) + goto out; + + got_lock = true; + } + /* Restart link */ - autoc |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -874,20 +901,18 @@ out: * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Restarts link on PHY and MAC based on settings passed in. **/ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); @@ -994,9 +1019,28 @@ mac_reset_top: hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { - if (autoc != hw->mac.orig_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | - IXGBE_AUTOC_AN_RESTART)); + if (autoc != hw->mac.orig_autoc) { + /* Need SW/FW semaphore around AUTOC writes if LESM is + * on, likewise reset_pipeline requires us to hold + * this lock as it also writes to AUTOC. + */ + bool got_lock = false; + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + goto reset_hw_out; + + got_lock = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { @@ -1022,7 +1066,7 @@ mac_reset_top: hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + if (is_valid_ether_addr(hw->mac.san_addr)) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); @@ -1983,7 +2027,7 @@ fw_version_out: * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; @@ -2080,6 +2124,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, return ret_val; } +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. Note - We must hold the SW/FW semaphore before writing + * to AUTOC, so this function assumes the semaphore is held. + **/ +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 i, autoc_reg, ret_val; + s32 anlp1_reg = 0; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + usleep_range(4000, 8000); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = 0; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -2153,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index dbf37e4a45fd..99e472ebaa75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -65,13 +65,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); * function check the device id to see if the associated phy supports * autoneg flow control. **/ -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: - return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; default: @@ -90,6 +89,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) s32 ret_val = 0; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; + bool got_lock = false; /* * Validate the requested mode. Strict IEEE mode does not allow @@ -210,8 +210,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_AN_RESTART; + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on, likewise reset_pipeline requries the lock as + * it also writes AUTOC. + */ + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && (ixgbe_device_supports_autoneg_fc(hw) == 0)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, @@ -1762,30 +1783,6 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) } /** - * ixgbe_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address. - * - * Tests a MAC address to ensure it is a valid Individual Address - **/ -s32 ixgbe_validate_mac_addr(u8 *mac_addr) -{ - s32 status = 0; - - /* Make sure it is not a multicast address */ - if (IXGBE_IS_MULTICAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Not a broadcast address */ - else if (IXGBE_IS_BROADCAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Reject the zero address */ - else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) - status = IXGBE_ERR_INVALID_MAC_ADDR; - - return status; -} - -/** * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write @@ -1889,8 +1886,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (ixgbe_validate_mac_addr(hw->mac.addr) == - IXGBE_ERR_INVALID_MAC_ADDR) { + if (!is_valid_ether_addr(hw->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); @@ -2617,6 +2613,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = 0; /* * Link must be up to auto-blink the LEDs; @@ -2625,10 +2622,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on. + */ + bool got_lock = false; + + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); usleep_range(10000, 20000); } @@ -2637,7 +2652,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** @@ -2649,18 +2665,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = 0; + bool got_lock = false; + + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on. + */ + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index d813d1188c36..bc3948ead6e0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -78,9 +78,9 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); @@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 9bc17c0cb972..1f2c805684dd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 1f4108ee154b..1634de8b627f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 87592b458c9c..ac780770863d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index ba835708fcac..3164f5453b8f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 4eac80d01857..05e23b80b5e3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index 4dec47faeb00..a4ef07631d1e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f1e002d5fa8f..f3d68f9696ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,6 +30,7 @@ #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 @@ -301,7 +302,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } -#ifdef IXGBE_FCOE static void ixgbe_dcbnl_devreset(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -320,7 +320,6 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) clear_bit(__IXGBE_RESETTING, &adapter->state); } -#endif static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { @@ -450,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { @@ -461,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: - rval = -EINVAL; + return -EINVAL; break; } } else { - rval = -EINVAL; + return -EINVAL; } - return rval; + return 0; } static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) @@ -541,6 +539,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err = 0; __u8 max_tc = 0; + __u8 map_chg = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; @@ -550,15 +549,22 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; - } - memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->ixgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) + map_chg = 1; } + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + if (max_tc) max_tc++; @@ -567,6 +573,8 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (max_tc != netdev_get_num_tc(dev)) err = ixgbe_setup_tc(dev, max_tc); + else if (map_chg) + ixgbe_dcbnl_devreset(dev); if (err) goto err_out; @@ -643,9 +651,11 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, return err; err = dcb_ieee_setapp(dev, app); + if (err) + return err; #ifdef IXGBE_FCOE - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); @@ -656,6 +666,23 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, ixgbe_dcbnl_devreset(dev); } #endif + + /* VF devices should use default UP when available */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0) { + int vf; + + adapter->default_up = app->priority; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + app->priority, vf); + } + } + return 0; } @@ -683,6 +710,24 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, ixgbe_dcbnl_devreset(dev); } #endif + /* IF default priority is being removed clear VF default UP */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0 && adapter->default_up == app->priority) { + int vf; + long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); + int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; + + adapter->default_up = qos; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + qos, vf); + } + } + return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 8d3a21889099..c5933f6dceee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -24,9 +24,6 @@ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ - -#ifdef CONFIG_DEBUG_FS - #include <linux/debugfs.h> #include <linux/module.h> @@ -37,20 +34,6 @@ static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; /** - * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened - * @inode: inode that was opened - * @filp: file info - * - * Stash the adapter pointer hiding in the inode into the file pointer where - * we can find it later in the read and write calls - **/ -static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - -/** * ixgbe_dbg_reg_ops_read - read for reg_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read @@ -61,23 +44,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char buf[256]; - int bytes_not_copied; + char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - len = snprintf(buf, sizeof(buf), "%s: %s\n", - adapter->netdev->name, ixgbe_dbg_reg_ops_buf); - if (count < len) + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); return -ENOSPC; - bytes_not_copied = copy_to_user(buffer, buf, len); - if (bytes_not_copied < 0) - return bytes_not_copied; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - *ppos = len; + kfree(buf); return len; } @@ -93,7 +80,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int bytes_not_copied; + int len; /* don't allow partial writes */ if (*ppos != 0) @@ -101,14 +88,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) return -ENOSPC; - bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied < count) - count -= bytes_not_copied; - else - return -ENOSPC; - ixgbe_dbg_reg_ops_buf[count] = '\0'; + len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, + sizeof(ixgbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_reg_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { u32 reg, value; @@ -142,7 +130,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, static const struct file_operations ixgbe_dbg_reg_ops_fops = { .owner = THIS_MODULE, - .open = ixgbe_dbg_reg_ops_open, + .open = simple_open, .read = ixgbe_dbg_reg_ops_read, .write = ixgbe_dbg_reg_ops_write, }; @@ -150,20 +138,6 @@ static const struct file_operations ixgbe_dbg_reg_ops_fops = { static char ixgbe_dbg_netdev_ops_buf[256] = ""; /** - * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item - * @inode: inode that was opened - * @filp: file info - * - * Stash the adapter pointer hiding in the inode into the file pointer - * where we can find it later in the read and write calls - **/ -static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - -/** * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read @@ -175,23 +149,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char buf[256]; - int bytes_not_copied; + char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - len = snprintf(buf, sizeof(buf), "%s: %s\n", - adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); - if (count < len) + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); return -ENOSPC; - bytes_not_copied = copy_to_user(buffer, buf, len); - if (bytes_not_copied < 0) - return bytes_not_copied; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - *ppos = len; + kfree(buf); return len; } @@ -207,7 +185,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int bytes_not_copied; + int len; /* don't allow partial writes */ if (*ppos != 0) @@ -215,15 +193,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) return -ENOSPC; - bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, - buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied < count) - count -= bytes_not_copied; - else - return -ENOSPC; - ixgbe_dbg_netdev_ops_buf[count] = '\0'; + len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, + sizeof(ixgbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_netdev_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); @@ -238,7 +216,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, static const struct file_operations ixgbe_dbg_netdev_ops_fops = { .owner = THIS_MODULE, - .open = ixgbe_dbg_netdev_ops_open, + .open = simple_open, .read = ixgbe_dbg_netdev_ops_read, .write = ixgbe_dbg_netdev_ops_write, }; @@ -296,5 +274,3 @@ void ixgbe_dbg_exit(void) { debugfs_remove_recursive(ixgbe_dbg_root); } - -#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 116f0e901bee..c3f1afd86906 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -39,6 +39,7 @@ #include <linux/uaccess.h> #include "ixgbe.h" +#include "ixgbe_phy.h" #define IXGBE_ALL_RAR_ENTRIES 16 @@ -156,7 +157,7 @@ static int ixgbe_get_settings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; u32 link_speed = 0; - bool autoneg; + bool autoneg = false; bool link_up; hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); @@ -333,10 +334,10 @@ static int ixgbe_set_settings(struct net_device *netdev, return err; /* this sets the link speed and restarts auto-neg */ hw->mac.autotry_restart = true; - err = hw->mac.ops.setup_link(hw, advertised, true, true); + err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); - hw->mac.ops.setup_link(hw, old, true, true); + hw->mac.ops.setup_link(hw, old, true); } } else { /* in this case we currently only support 10Gb/FULL */ @@ -383,6 +384,11 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; + /* some devices do not support autoneg of link flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + (ixgbe_device_supports_autoneg_fc(hw) != 0)) + return -EINVAL; + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) @@ -887,24 +893,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; + struct ixgbe_ring *temp_ring; int i, err = 0; u32 new_rx_count, new_tx_count; - bool need_update = false; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); - new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); - new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); + new_tx_count = clamp_t(u32, ring->tx_pending, + IXGBE_MIN_TXD, IXGBE_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - if ((new_tx_count == adapter->tx_ring[0]->count) && - (new_rx_count == adapter->rx_ring[0]->count)) { + new_rx_count = clamp_t(u32, ring->rx_pending, + IXGBE_MIN_RXD, IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { /* nothing to do */ return 0; } @@ -922,81 +927,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev, goto clear_reset; } - temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); - if (!temp_tx_ring) { + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); + + if (!temp_ring) { err = -ENOMEM; goto clear_reset; } + ixgbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_tx_ring[i], adapter->tx_ring[i], + memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct ixgbe_ring)); - temp_tx_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; - ixgbe_free_tx_resources(&temp_tx_ring[i]); + ixgbe_free_tx_resources(&temp_ring[i]); } - goto clear_reset; + goto err_setup; } } - need_update = true; - } - temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); - if (!temp_rx_ring) { - err = -ENOMEM; - goto err_setup; + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; } + /* Repeat the process for the Rx rings if needed */ if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_rx_ring[i], adapter->rx_ring[i], + memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); - temp_rx_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); + + temp_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(&temp_ring[i]); if (err) { while (i) { i--; - ixgbe_free_rx_resources(&temp_rx_ring[i]); + ixgbe_free_rx_resources(&temp_ring[i]); } goto err_setup; } + } - need_update = true; - } - /* if rings need to be updated, here's the place to do it in one shot */ - if (need_update) { - ixgbe_down(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); - /* tx */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbe_free_tx_resources(adapter->tx_ring[i]); - memcpy(adapter->tx_ring[i], &temp_tx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->tx_ring_count = new_tx_count; + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); } - /* rx */ - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbe_free_rx_resources(adapter->rx_ring[i]); - memcpy(adapter->rx_ring[i], &temp_rx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->rx_ring_count = new_rx_count; - } - ixgbe_up(adapter); + adapter->rx_ring_count = new_rx_count; } - vfree(temp_rx_ring); err_setup: - vfree(temp_tx_ring); + ixgbe_up(adapter); + vfree(temp_ring); clear_reset: clear_bit(__IXGBE_RESETTING, &adapter->state); return err; @@ -1037,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, p = (char *) adapter + ixgbe_gstrings_stats[i].stat_offset; break; + default: + data[i] = 0; + continue; } data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == @@ -1093,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); + for (i = 0; i < IXGBE_TEST_LEN; i++) { + memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { @@ -1834,19 +1843,11 @@ static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; bool if_running = netif_running(netdev); set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - e_info(hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { @@ -1867,12 +1868,24 @@ static void ixgbe_diag_test(struct net_device *netdev, } } + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + if (if_running) /* indicate we're in test mode */ dev_close(netdev); - else - ixgbe_reset(adapter); + /* bringing adapter down disables SFP+ optics */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); e_info(hw, "register testing starting\n"); if (ixgbe_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1905,16 +1918,22 @@ static void ixgbe_diag_test(struct net_device *netdev, skip_loopback: ixgbe_reset(adapter); + /* clear testing bit and return adapter to previous state */ clear_bit(__IXGBE_TESTING, &adapter->state); if (if_running) dev_open(netdev); } else { e_info(hw, "online testing starting\n"); + + /* if adapter is down, SFP+ optics will be disabled */ + if (!if_running && hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + /* Online tests */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; - /* Online tests aren't run; pass by default */ + /* Offline tests aren't run; pass by default */ data[0] = 0; data[1] = 0; data[2] = 0; @@ -1922,6 +1941,10 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); } + + /* if adapter was down, ensure SFP+ optics are disabled again */ + if (!if_running && hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); skip_ol_tests: msleep_interruptible(4 * 1000); } @@ -2090,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; - u16 tx_itr_param, rx_itr_param; + u16 tx_itr_param, rx_itr_param, tx_itr_prev; bool need_reset = false; - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) - return -EINVAL; + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) @@ -2122,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev, else tx_itr_param = adapter->tx_itr_setting; + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + +#if IS_ENABLED(CONFIG_BQL) + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting > 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev > IXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev > 1) && + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } +#endif /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter); + need_reset |= ixgbe_update_rsc(adapter); for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; @@ -2153,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, union ixgbe_atr_input *mask = &adapter->fdir_mask; struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *rule = NULL; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (fsp->location <= rule->sw_idx) break; @@ -2220,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *rule; int cnt = 0; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (cnt == cmd->rule_cnt) return -EMSGSIZE; @@ -2314,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2, *parent; - struct ixgbe_fdir_filter *rule; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule, *parent; int err = -EINVAL; parent = NULL; rule = NULL; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { /* hash found, or no matching entry */ if (rule->sw_idx >= sw_idx) break; - parent = node; + parent = rule; } /* if there is an old rule occupying our place remove it */ @@ -2355,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_after(parent, &input->fdir_node); + hlist_add_after(&parent->fdir_node, &input->fdir_node); else hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); @@ -2669,7 +2713,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { -#ifdef CONFIG_IXGBE_PTP case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -2693,9 +2736,16 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; -#endif /* CONFIG_IXGBE_PTP */ default: return ethtool_op_get_ts_info(dev, info); break; @@ -2703,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev, return 0; } +static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + max_combined = 4; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = IXGBE_MAX_FDIR_INDICES; + } else { + /* support up to 16 queues with RSS */ + max_combined = IXGBE_MAX_RSS_INDICES; + } + + return max_combined; +} + +static void ixgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ixgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int ixgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ixgbe_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit at 16 */ + if (count > IXGBE_MAX_RSS_INDICES) + count = IXGBE_MAX_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = count; + +#ifdef IXGBE_FCOE + /* cap FCoE limit at 8 */ + if (count > IXGBE_FCRETA_SIZE) + count = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; + +#endif + /* use setup TC to update any traffic class queue mapping */ + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ixgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + int ret_val = 0; + bool page_swap = false; + + /* avoid concurent i2c reads */ + while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + msleep(100); + + /* used by the service task */ + set_bit(__IXGBE_READ_I2C, &adapter->state); + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) { + ret_val = -EIO; + goto err_out; + } + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) { + ret_val = -EIO; + goto err_out; + } + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + +err_out: + clear_bit(__IXGBE_READ_I2C, &adapter->state); + return ret_val; +} + +static int ixgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + int ret_val = 0; + + /* ixgbe_get_module_info is called before this function in all + * cases, so we do not need any checks we already do above, + * and can trust ee->len to be a known value. + */ + + while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + msleep(100); + set_bit(__IXGBE_READ_I2C, &adapter->state); + + /* Read the first block, SFF-8079 */ + for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) { + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + if (status != 0) { + /* Error occured while reading module */ + ret_val = -EIO; + goto err_out; + } + data[i] = databyte; + } + + /* If the second block is requested, check if SFF-8472 is supported. */ + if (ee->len == ETH_MODULE_SFF_8472_LEN) { + if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) + return -EOPNOTSUPP; + + /* Read the second block, SFF-8472 */ + for (i = ETH_MODULE_SFF_8079_LEN; + i < ETH_MODULE_SFF_8472_LEN; i++) { + status = hw->phy.ops.read_i2c_sff8472(hw, + i - ETH_MODULE_SFF_8079_LEN, &databyte); + if (status != 0) { + /* Error occured while reading module */ + ret_val = -EIO; + goto err_out; + } + data[i] = databyte; + } + } + +err_out: + clear_bit(__IXGBE_READ_I2C, &adapter->state); + + return ret_val; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -2731,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ae73ef14fdf3..f58db453a97e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -544,15 +544,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; - first->tx_flags |= IXGBE_TX_FLAGS_FSO; + first->tx_flags |= IXGBE_TX_FLAGS_TSO; } /* set flag indicating FCOE to ixgbe_tx_map call */ - first->tx_flags |= IXGBE_TX_FLAGS_FCOE; + first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; - /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_transport_offset(skb) + @@ -717,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) /* Extra buffer to be shared by all DDPs for HW work around */ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (!buffer) { - e_err(drv, "failed to allocate extra DDP buffer\n"); + if (!buffer) return -ENOMEM; - } dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) { @@ -800,6 +797,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev) return -EINVAL; e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index bf724da99375..3a02759b5e95 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 17ecbcedd548..ef5f7a678ce1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ - fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (fcoe_i) { @@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (vmdq_i > 1 && fcoe_i) { - /* reserve no more than number of CPUs */ - fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); - /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * rss_i; @@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) if (rss_i > 1 && adapter->atr_sample_rate) { f = &adapter->ring_feature[RING_F_FDIR]; - f->indices = min_t(u16, num_online_cpus(), f->limit); - rss_i = max_t(u16, rss_i, f->indices); + rss_i = f->indices = f->limit; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, { struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; - int node = -1; + int node = NUMA_NO_NODE; int cpu = -1; int ring_count, size; + u8 tcs = netdev_get_num_tc(adapter->netdev); ring_count = txr_count + rxr_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } } } @@ -802,10 +801,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* setup affinity mask and node */ if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); - else - cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); q_vector->numa_node = node; +#ifdef CONFIG_IXGBE_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); @@ -821,6 +823,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize pointer to rings */ ring = q_vector->ring; + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fa3d552e1f4a..db5611ae407e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -44,6 +44,7 @@ #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> @@ -62,14 +63,10 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define MAJ 3 -#define MIN 9 -#define BUILD 15 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) "-k" +#define DRV_VERSION "3.11.33-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2012 Intel Corporation."; + "Copyright (c) 1999-2013 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -335,11 +332,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), @@ -355,13 +354,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) /* Transmit Descriptor Formats * - * Advanced Transmit Descriptor + * 82598 Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ - * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + * + * 82598 Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | NXTSEQ | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + * + * 82599+ Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * 82599+ Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { @@ -369,40 +392,43 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("------------------------------------\n"); pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] " - "[PlPOIdStDDt Ln] [bi->dma ] " - "leng ntw timestamp bi->skb\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = IXGBE_TX_DESC(tx_ring, i); tx_buffer = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp, - tx_buffer->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - pr_cont(" NTC/U\n"); - else if (i == tx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == tx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - if (netif_msg_pktdata(adapter) && - tx_buffer->skb) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - tx_buffer->skb->data, + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), - true); + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } } } @@ -422,7 +448,9 @@ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - /* Advanced Receive Descriptor (Read) Format + /* Receive Descriptor Formats + * + * 82598 Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| @@ -431,27 +459,52 @@ rx_ring_summary: * +-----------------------------------------------------+ * * - * Advanced Receive Descriptor (Write-Back) Format + * 82598 Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 16 15 4 3 0 * +------------------------------------------------------+ - * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | - * | Checksum Ident | | | | Type | Type | + * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | + * | Packet | IP | | | | Type | Type | + * | Checksum | Ident | | | | | | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 + * + * 82599+ Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82599+ Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 */ + for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] " - "[ HeadBuf DD] [bi->dma ] [bi->skb] " + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", "<-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] " - "[vl er S cks ln] ---------------- [bi->skb] " + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { @@ -646,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u32 xoff[8] = {0}; + u8 tc; int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; @@ -659,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + switch (hw->mac.type) { case ixgbe_mac_82598EB: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; default: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } - hwstats->pxoffrxc[i] += xoff[i]; + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; } /* disarm tx queues that have received xoff frames */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - u8 tc = tx_ring->dcb_tc; + tc = tx_ring->dcb_tc; if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } @@ -744,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } } @@ -778,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - rmb(); + read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -791,11 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; -#ifdef CONFIG_IXGBE_PTP - if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) - ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); -#endif - /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); @@ -967,7 +1022,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | - IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); @@ -1244,6 +1298,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, struct vlan_hdr *vlan; /* l3 headers */ struct iphdr *ipv4; + struct ipv6hdr *ipv6; } hdr; __be16 protocol; u8 nexthdr = 0; /* default to not TCP */ @@ -1281,20 +1336,30 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, if (hlen < sizeof(struct iphdr)) return hdr.network - data; + /* record next protocol if header is present */ + if (!hdr.ipv4->frag_off) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == __constant_htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + /* record next protocol */ - nexthdr = hdr.ipv4->protocol; - hdr.network += hlen; + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); #ifdef IXGBE_FCOE } else if (protocol == __constant_htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; - hdr.network += FCOE_HEADER_LEN; + hlen = FCOE_HEADER_LEN; #endif } else { return hdr.network - data; } - /* finally sort out TCP */ + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) return max_len; @@ -1307,6 +1372,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return hdr.network - data; hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); } /* @@ -1329,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), IXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, @@ -1369,9 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); -#endif + ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -1781,7 +1850,7 @@ dma_sync: **/ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, - int budget) + const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE @@ -1832,7 +1901,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); @@ -1865,8 +1933,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ - budget--; - } while (likely(budget)); + total_rx_packets++; + } while (likely(total_rx_packets < budget)); u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1878,7 +1946,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (cleaned_count) ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return !!budget; + return (total_rx_packets < budget); } /** @@ -1914,20 +1982,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); - if (q_vector->tx.ring && !q_vector->rx.ring) { - /* tx only vector */ - if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; - else - q_vector->itr = adapter->tx_itr_setting; - } else { - /* rx or rx/tx vector */ - if (adapter->rx_itr_setting == 1) - q_vector->itr = IXGBE_20K_ITR; - else - q_vector->itr = adapter->rx_itr_setting; - } - ixgbe_write_eitr(q_vector); } @@ -2125,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) return; if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { - u32 autoneg; + u32 speed; bool link_up = false; - hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (link_up) return; @@ -2324,10 +2378,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } -#ifdef CONFIG_IXGBE_PTP if (adapter->hw.mac.type == ixgbe_mac_X540) mask |= IXGBE_EIMS_TIMESYNC; -#endif if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) @@ -2393,10 +2445,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); -#ifdef CONFIG_IXGBE_PTP if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter, eicr); -#endif /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2588,10 +2638,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); -#ifdef CONFIG_IXGBE_PTP if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter, eicr); -#endif /* would disable interrupts here but EIAM disabled it */ napi_schedule(&q_vector->napi); @@ -2699,12 +2747,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - /* rx/tx vector */ - if (adapter->rx_itr_setting == 1) - q_vector->itr = IXGBE_20K_ITR; - else - q_vector->itr = adapter->rx_itr_setting; - ixgbe_write_eitr(q_vector); ixgbe_set_ivar(adapter, 0, 0, 0); @@ -2744,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* * set WTHRESH to encourage burst writeback, it should not be set - * higher than 1 when ITR is 0 as it could cause false TX hangs + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled * * In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ +#if IS_ENABLED(CONFIG_BQL) + if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) +#else if (!ring->q_vector || (ring->q_vector->itr < 8)) +#endif txdctl |= (1 << 16); /* WTHRESH = 1 */ else txdctl |= (8 << 16); /* WTHRESH = 8 */ @@ -2771,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ring->atr_sample_rate = 0; } + /* initialize XPS */ + if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct ixgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); /* enable queue */ @@ -3132,14 +3190,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); - /* If operating in IOV mode set RLPML for X540 */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - hw->mac.type == ixgbe_mac_X540) { - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ((ring->netdev->mtu + ETH_HLEN + - ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); - } - if (hw->mac.type == ixgbe_mac_82598EB) { /* * enable cache line friendly hardware writes: @@ -3211,7 +3261,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); @@ -3234,8 +3285,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - /* enable Tx loopback for VF/PF communication */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), @@ -3263,6 +3312,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* IXGBE_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; @@ -3271,9 +3325,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } - /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; @@ -3840,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); @@ -3848,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) if (!hlist_empty(&adapter->fdir_filter_list)) ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); - hlist_for_each_entry_safe(filter, node, node2, + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, @@ -3960,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { - u32 autoneg; - bool negotiation, link_up = false; + u32 speed; + bool autoneg, link_up = false; u32 ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) - ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); if (ret) goto link_cfg_out; - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, - &negotiation); + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &speed, + &autoneg); if (ret) goto link_cfg_out; if (hw->mac.ops.setup_link) - ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); + ret = hw->mac.ops.setup_link(hw, speed, link_up); link_cfg_out: return ret; } @@ -4072,11 +4123,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) else ixgbe_configure_msi_and_legacy(adapter); - /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.enable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* enable the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); clear_bit(__IXGBE_DOWN, &adapter->state); @@ -4192,6 +4240,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_reset(adapter); } /** @@ -4305,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) { - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); - hlist_for_each_entry_safe(filter, node, node2, + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); @@ -4393,11 +4444,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); ixgbe_clean_all_tx_rings(adapter); @@ -4429,11 +4477,12 @@ static void ixgbe_tx_timeout(struct net_device *netdev) * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +static int ixgbe_sw_init(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned int rss; + unsigned int rss, fdir; + u32 fwsm; #ifdef CONFIG_IXGBE_DCB int j; struct tc_configuration *tc; @@ -4447,37 +4496,58 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - /* Set capability flags */ + /* Set common capability flags and settings */ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + adapter->max_q_vectors = MAX_Q_VECTORS_82599; + adapter->atr_sample_rate = 20; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef CONFIG_IXGBE_DCA + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif /* CONFIG_IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + adapter->max_q_vectors = MAX_Q_VECTORS_82598; - break; - case ixgbe_mac_X540: - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - case ixgbe_mac_82599EB: - adapter->max_q_vectors = MAX_Q_VECTORS_82599; - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - /* Flow Director hash filters enabled */ - adapter->atr_sample_rate = 20; - adapter->ring_feature[RING_F_FDIR].limit = - IXGBE_MAX_FDIR_INDICES; - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; + adapter->ring_feature[RING_F_FDIR].limit = 0; + adapter->atr_sample_rate = 0; + adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; #ifdef CONFIG_IXGBE_DCB - /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ #endif /* IXGBE_FCOE */ break; + case ixgbe_mac_82599EB: + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + case ixgbe_mac_X540: + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if (fwsm & IXGBE_FWSM_TS_ENABLED) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; default: break; } @@ -4533,7 +4603,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = false; + hw->fc.disable_fc_autoneg = + (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true; #ifdef CONFIG_PCI_IOV /* assign number of SR-IOV VFs */ @@ -4828,14 +4899,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; /* - * For 82599EB we cannot allow PF to change MTU greater than 1500 - * in SR-IOV mode as it may cause buffer overruns in guest VFs that - * don't allocate and chain buffers correctly. + * For 82599EB we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && - (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) - return -EINVAL; + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -4901,6 +4972,8 @@ static int ixgbe_open(struct net_device *netdev) if (err) goto err_set_queues; + ixgbe_ptp_init(adapter); + ixgbe_up_complete(adapter); return 0; @@ -4932,6 +5005,8 @@ static int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + ixgbe_ptp_stop(adapter); + ixgbe_down(adapter); ixgbe_free_irq(adapter); @@ -5022,14 +5097,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) if (wufc) { ixgbe_set_rx_mode(netdev); - /* - * enable the optics for both mult-speed fiber and - * 82599 SFP+ fiber as we can WoL. - */ - if (hw->mac.ops.enable_tx_laser && - (hw->phy.multispeed_fiber || - (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && - hw->mac.type == ixgbe_mac_82599EB))) + /* enable the optics for 82599 SFP+ fiber as we can WoL */ + if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); /* turn on all-multi mode if wake on multicast is enabled */ @@ -5442,6 +5511,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) adapter->link_speed = link_speed; } +static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .protocol = 0, + }; + u8 up = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) + up = dcb_ieee_getapp_mask(netdev, &app); + + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#endif +} + /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message @@ -5482,9 +5568,10 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) break; } -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_start_cyclecounter(adapter); -#endif + adapter->last_rx_ptp_check = jiffies; + + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? @@ -5501,6 +5588,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); + /* update the default user priority for VFs */ + ixgbe_update_default_up(adapter); + /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); } @@ -5526,9 +5616,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_start_cyclecounter(adapter); -#endif + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); @@ -5561,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ + e_warn(drv, "initiating reset to clear Tx work after link loss\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; } } @@ -5625,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; + /* concurent i2c reads are not supported */ + if (test_bit(__IXGBE_READ_I2C, &adapter->state)) + return; + /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; @@ -5685,8 +5779,8 @@ sfp_out: static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 autoneg; - bool negotiation; + u32 speed; + bool autoneg = false; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) return; @@ -5697,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); if (hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, autoneg, negotiation, true); + hw->mac.ops.setup_link(hw, speed, true); adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; @@ -5825,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work) struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); - ixgbe_reset_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); @@ -5833,9 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_overflow_check(adapter); -#endif + + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { + ixgbe_ptp_overflow_check(adapter); + ixgbe_ptp_rx_hang(adapter); + } ixgbe_service_event_complete(adapter); } @@ -5848,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -5890,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; - /* mss_l4len_id: use 1 as index for TSO */ + /* mss_l4len_id: use 0 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); @@ -5915,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { - if (unlikely(skb->no_fcs)) - first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; - if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; - } + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_CC)) + return; } else { u8 l4_hdr = 0; switch (first->protocol) { @@ -5978,32 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, type_tucmd, mss_l4len_idx); } -static __le32 ixgbe_tx_cmd_type(u32 tx_flags) +#define IXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ - __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT); + u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; /* set HW vlan bit if vlan is present */ - if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); - -#ifdef CONFIG_IXGBE_PTP - if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); -#endif + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, + IXGBE_ADVTXD_DCMD_VLE); /* set segmentation enable bits for TSO/FSO */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, + IXGBE_ADVTXD_DCMD_TSE); + + /* set timestamp bit if present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, + IXGBE_ADVTXD_MAC_TSTAMP); /* insert frame checksum */ - if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) - cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); return cmd_type; } @@ -6011,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { - __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); + u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; /* enable L4 checksum for TSO and TX checksum offload */ - if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CSUM, + IXGBE_ADVTXD_POPTS_TXSM); /* enble IPv4 checksum for TSO */ - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - - /* use index 1 context for TSO/FSO/FCOE */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TSO) -#endif - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_IPV4, + IXGBE_ADVTXD_POPTS_IXSM); /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ -#ifdef IXGBE_FCOE - if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) -#else - if (tx_flags & IXGBE_TX_FLAGS_TXSW) -#endif - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CC, + IXGBE_ADVTXD_CC); - tx_desc->read.olinfo_status = olinfo_status; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ @@ -6050,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) { - dma_addr_t dma; struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int data_len = skb->data_len; - unsigned int size = skb_headlen(skb); - unsigned int paylen = skb->len - hdr_len; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; u32 tx_flags = first->tx_flags; - __le32 cmd_type; + u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); - ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); - cmd_type = ixgbe_tx_cmd_type(tx_flags); + ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; #ifdef IXGBE_FCOE if (tx_flags & IXGBE_TX_FLAGS_FCOE) { @@ -6079,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, #endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - /* record length, and DMA address */ - dma_unmap_len_set(first, len, size); - dma_unmap_addr_set(first, dma, dma); + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.buffer_addr = cpu_to_le64(dma); - for (;;) { while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = - cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); + cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); i++; tx_desc++; @@ -6099,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; } if (likely(!data_len)) break; - tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); i++; tx_desc++; @@ -6118,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; #ifdef IXGBE_FCOE size = min_t(unsigned int, data_len, skb_frag_size(frag)); @@ -6128,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; tx_buffer = &tx_ring->tx_buffer_info[i]; - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; - - frag++; } /* write last descriptor with RS and EOP bits */ - cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); - tx_desc->read.cmd_type_len = cmd_type; + cmd_type |= size | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); @@ -6304,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : - smp_processor_id(); -#ifdef IXGBE_FCOE - __be16 protocol = vlan_get_protocol(skb); + struct ixgbe_adapter *adapter; + struct ixgbe_ring_feature *f; + int txq; - if (((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP))) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - struct ixgbe_ring_feature *f; + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case __constant_htons(ETH_P_FCOE): + case __constant_htons(ETH_P_FIP): + adapter = netdev_priv(dev); - f = &adapter->ring_feature[RING_F_FCOE]; + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + break; + default: + return __netdev_pick_tx(dev, skb); + } - while (txq >= f->indices) - txq -= f->indices; - txq += adapter->ring_feature[RING_F_FCOE].offset; + f = &adapter->ring_feature[RING_F_FCOE]; - return txq; - } -#endif + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - while (unlikely(txq >= dev->real_num_tx_queues)) - txq -= dev->real_num_tx_queues; - return txq; - } + while (txq >= f->indices) + txq -= f->indices; - return skb_tx_hash(dev, skb); + return txq + f->offset; } +#endif netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -6393,12 +6475,15 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); -#ifdef CONFIG_IXGBE_PTP if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); } -#endif #ifdef CONFIG_PCI_IOV /* @@ -6406,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, * Tx switch had been disabled. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - tx_flags |= IXGBE_TX_FLAGS_TXSW; + tx_flags |= IXGBE_TX_FLAGS_CC; #endif /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ @@ -6485,6 +6570,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, if (skb_pad(skb, 17 - skb->len)) return NETDEV_TX_OK; skb->len = 17; + skb_set_tail_pointer(skb, 17); } tx_ring = adapter->tx_ring[skb->queue_mapping]; @@ -6547,10 +6633,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (cmd) { -#ifdef CONFIG_IXGBE_PTP case SIOCSHWTSTAMP: return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); -#endif default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -6738,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) } } +#endif /* CONFIG_IXGBE_DCB */ /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * @@ -6763,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_close(dev); ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_IXGBE_DCB if (tc) { netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); @@ -6785,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) adapter->dcb_cfg.pfc_mode_enable = false; } - ixgbe_init_interrupt_scheme(adapter); ixgbe_validate_rtr(adapter, tc); + +#endif /* CONFIG_IXGBE_DCB */ + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) - ixgbe_open(dev); + return ixgbe_open(dev); return 0; } -#endif /* CONFIG_IXGBE_DCB */ +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} + +#endif void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -6910,13 +7009,16 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; - if (ndm->ndm_state & NUD_PERMANENT) { + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", ixgbe_driver_name); return -EINVAL; } - if (is_unicast_ether_addr(addr)) { + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; if (netdev_uc_count(dev) < rar_uc_entries) @@ -6936,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, +static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr) { @@ -6974,11 +7076,69 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb, return idx; } +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + u32 reg = 0; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + reg = 0; + adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; + } else if (mode == BRIDGE_MODE_VEB) { + reg = IXGBE_PFDTXGSWC_VT_LBEN; + adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + } else + return -EINVAL; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, +#ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, +#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -7013,6 +7173,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fdb_add = ixgbe_ndo_fdb_add, .ndo_fdb_del = ixgbe_ndo_fdb_del, .ndo_fdb_dump = ixgbe_ndo_fdb_dump, + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, }; /** @@ -7042,6 +7204,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, break; case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: + case IXGBE_SUBDEV_ID_82599_ECNA_DP: is_wol_supported = 1; break; } @@ -7079,8 +7242,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; @@ -7088,9 +7250,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; + unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; - unsigned int indices = num_possible_cpus(); - unsigned int dcb_max = 0; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -7139,25 +7300,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, pci_set_master(pdev); pci_save_state(pdev); + if (ii->mac == ixgbe_mac_82598EB) { #ifdef CONFIG_IXGBE_DCB - if (ii->mac == ixgbe_mac_82598EB) - dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, - IXGBE_MAX_RSS_INDICES); - else - dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, - IXGBE_MAX_FDIR_INDICES); + /* 8 TC w/ 4 queues per TC */ + indices = 4 * MAX_TRAFFIC_CLASS; +#else + indices = IXGBE_MAX_RSS_INDICES; #endif + } - if (ii->mac == ixgbe_mac_82598EB) - indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); - else - indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); - -#ifdef IXGBE_FCOE - indices += min_t(unsigned int, num_possible_cpus(), - IXGBE_MAX_FCOE_INDICES); -#endif - indices = max_t(unsigned int, dcb_max, indices); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; @@ -7260,7 +7411,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } #ifdef CONFIG_PCI_IOV - ixgbe_enable_sriov(adapter, ii); + /* SR-IOV not supported on the 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + goto skip_sriov; + /* Mailbox */ + ixgbe_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + ixgbe_enable_sriov(adapter); + pci_sriov_set_totalvfs(pdev, 63); +skip_sriov: #endif netdev->features = NETIF_F_SG | @@ -7304,13 +7463,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } - adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + + fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC; @@ -7338,9 +7501,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); - if (ixgbe_validate_mac_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; @@ -7364,10 +7526,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_init(adapter); -#endif /* CONFIG_IXGBE_PTP*/ - /* save off EEPROM version number */ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); @@ -7420,11 +7578,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_register; - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); /* carrier off reporting is important to ethtool even BEFORE open */ @@ -7493,7 +7648,7 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void __devexit ixgbe_remove(struct pci_dev *pdev) +static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; @@ -7505,9 +7660,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_stop(adapter); -#endif #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { @@ -7527,8 +7679,14 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - ixgbe_disable_sriov(adapter); - +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + if (max_vfs) + ixgbe_disable_sriov(adapter); +#endif ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); @@ -7633,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, if (vfdev) { e_dev_err("Issuing VFLR to VF %d\n", vf); pci_write_config_dword(vfdev, 0xA8, 0x00008000); + /* Free device reference count */ + pci_dev_put(vfdev); } pci_cleanup_aer_uncorrect_error_status(pdev); @@ -7736,12 +7896,13 @@ static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, - .remove = __devexit_p(ixgbe_remove), + .remove = ixgbe_remove, #ifdef CONFIG_PM .suspend = ixgbe_suspend, .resume = ixgbe_resume, #endif .shutdown = ixgbe_shutdown, + .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 1f3e32b576a5..d4a64e665398 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 310bdd961075..e44ff47659b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -62,12 +62,39 @@ /* bits 23:16 are used for exra info for certain messages */ #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +/* definitions to support mailbox API version negotiation */ + +/* + * Each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 71659edf81aa..060d2ad2ac96 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -494,11 +494,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete) { @@ -854,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, - &identifier); + &identifier); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; /* LAN ID is needed for sfp_type determination */ @@ -872,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; /* ID Module @@ -986,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]); - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status != 0) goto err_read_i2c_eeprom; vendor_oui = @@ -1206,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write @@ -1293,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, break; fail: + ixgbe_i2c_bus_clear(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(100); - ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index cc18165b4c05..886a3431cf5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -30,6 +30,7 @@ #include "ixgbe_type.h" #define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 /* EEPROM byte offsets */ #define IXGBE_SFF_IDENTIFIER 0x0 @@ -41,6 +42,8 @@ #define IXGBE_SFF_10GBE_COMP_CODES 0x3 #define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 #define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -51,6 +54,7 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 @@ -88,6 +92,9 @@ #define IXGBE_TN_LASI_STATUS_REG 0x9005 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 +/* SFP+ SFF-8472 Compliance code */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); @@ -98,7 +105,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, @@ -126,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index d9291316ee9f..331987d6815c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -96,15 +96,12 @@ #define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL #define IXGBE_OVERFLOW_PERIOD (HZ * 30) +#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) #ifndef NSECS_PER_SEC #define NSECS_PER_SEC 1000000000ULL #endif -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - /** * ixgbe_ptp_setup_sdp * @hw: the hardware private structure @@ -387,6 +384,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + switch (hw->mac.type) { case ixgbe_mac_X540: ptp_clock_event(adapter->ptp_clock, &event); @@ -396,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) } } - /** - * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow - * @work: structure containing information about this work task + * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct * - * this work function is scheduled to continue reading the timecounter + * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap - * around. This needs to be run approximately twice a minute when no - * PTP activity is occurring. + * around. This needs to be run approximately twice a minute. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { - unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + IXGBE_OVERFLOW_PERIOD); struct timespec ts; - if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && - (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { + if (timeout) { ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); adapter->last_overflow_check = jiffies; } } /** - * ixgbe_ptp_match - determine if this skb matches a ptp packet - * @skb: pointer to the skb - * @hwtstamp: pointer to the hwtstamp_config to check - * - * Determine whether the skb should have been timestamped, assuming the - * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet - * should have a timestamp waiting in the registers, and 0 otherwise. + * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure * - * V1 packets have to check the version type to determine whether they are - * correct. However, we can't directly access the data because it might be - * fragmented in the SKB, in paged memory. In order to work around this, we - * use skb_copy_bits which will properly copy the data whether it is in the - * paged memory fragments or not. We have to copy the IP header as well as the - * message type. + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. */ -static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { - struct iphdr iph; - u8 msgtype; - unsigned int type, offset; - - if (rx_filter == HWTSTAMP_FILTER_NONE) - return 0; - - type = sk_run_filter(skb, ptp_filter); - - if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) - return type & PTP_CLASS_V2; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring *rx_ring; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + unsigned long rx_event; + int n; - /* For the remaining cases actually check message type */ - switch (type) { - case PTP_CLASS_V1_IPV4: - skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); - offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; - break; - case PTP_CLASS_V1_IPV6: - offset = OFF_PTP6 + OFF_PTP_CONTROL; - break; - default: - /* other cases invalid or handled above */ - return 0; + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; } - /* Make sure our buffer is long enough */ - if (skb->len < offset) - return 0; + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } - skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; - switch (rx_filter) { - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); - break; - default: - return 0; + e_warn(drv, "clearing RX Timestamp hang"); } } /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: structure containing interrupt and ring information - * @skb: particular skb to send timestamp with + * @adapter: the private adapter struct * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval = 0, ns; - u32 tsynctxctl; unsigned long flags; - /* we cannot process timestamps on a ring without a q_vector */ - if (!q_vector || !q_vector->adapter) - return; - - adapter = q_vector->adapter; - hw = &adapter->hw; - - tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; - /* - * if TX timestamp is not valid, exit after clearing the - * timestamp registers - */ - if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID)) - return; - spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->tc, regval); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &shhwtstamps); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; +} + +/** + * ixgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, + ptp_tx_work); + struct ixgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb */ + if (!adapter->ptp_tx_skb) + return; + + if (timeout) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + e_warn(drv, "clearing Tx Timestamp hang"); + return; + } + + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + ixgbe_ptp_tx_hwtstamp(adapter); + else + /* reschedule to keep checking if it's not available yet */ + schedule_work(&adapter->ptp_tx_work); } /** - * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @q_vector: structure containing interrupt and ring information - * @rx_desc: the rx descriptor * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; @@ -554,35 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, adapter = q_vector->adapter; hw = &adapter->hw; + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - - /* Check if we have a valid timestamp and make sure the skb should - * have been timestamped */ - if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || - !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; - /* - * Always read the registers, in order to clear a possible fault - * because of stagnant RX timestamp values for a packet that never - * reached the queue. - */ regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - /* - * If the timestamp bit is set in the packet's descriptor, we know the - * timestamp belongs to this packet. No other packet can be - * timestamped until the registers for timestamping have been read. - * Therefor only one packet with this bit can be in the queue at a - * time, and the rx timestamp values that were in the registers belong - * to this packet. - * - * If nothing went wrong, then it should have a skb_shared_tx that we - * can turn into a skb_shared_hwtstamps. - */ - if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - return; spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->tc, regval); @@ -622,8 +606,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct hwtstamp_config config; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; - u32 tsync_rx_mtrl = 0; - bool is_l4 = false; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; bool is_l2 = false; u32 regval; @@ -646,16 +629,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; - is_l4 = true; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - is_l4 = true; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -668,7 +650,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; - is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: @@ -690,45 +671,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, return 0; } - /* Store filter value for later use */ - adapter->rx_hwtstamp_filter = config.rx_filter; - - /* define ethertype filter for timestamped packets */ + /* define ethertype filter for timestamping L2 packets */ if (is_l2) - IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), (IXGBE_ETQF_FILTER_EN | /* enable filter */ IXGBE_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); - -#define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */ - | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */ - | IXGBE_FTQF_QUEUE_ENABLE); - - ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */ - & IXGBE_FTQF_DEST_PORT_MASK /* dest check */ - & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */ - << IXGBE_FTQF_5TUPLE_MASK_SHIFT); - - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3), - (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 | - IXGBE_IMIR_SIZE_BP_82599)); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); - /* enable port check */ - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3), - (htons(PTP_PORT) | - htons(PTP_PORT) << 16)); - - IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf); - - tsync_rx_mtrl |= PTP_PORT << 16; - } else { - IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0); - } /* enable/disable TX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); @@ -759,58 +710,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw * @adapter: pointer to the adapter structure * - * this function initializes the timecounter and cyclecounter - * structures for use in generated a ns counter from the arbitrary - * fixed point cycles registers in the hardware. - * - * A change in link speed impacts the frequency of the DMA clock on - * the device, which is used to generate the cycle counter - * registers. Therefor this function is called whenever the link speed - * changes. - * - * This function also turns on the SDP pin for clock out feature (X540 - * only), because this is where the shift is first calculated. + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. */ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 incval = 0; - u32 timinca = 0; u32 shift = 0; - u32 cycle_speed; unsigned long flags; /** - * Determine what speed we need to set the cyclecounter - * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat - * unknown speeds as 10Gb. (Hence why we can't just copy the - * link_speed. - */ - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - case IXGBE_LINK_SPEED_1GB_FULL: - case IXGBE_LINK_SPEED_10GB_FULL: - cycle_speed = adapter->link_speed; - break; - default: - /* cycle speed should be 10Gb when there is no link */ - cycle_speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - } - - /* - * grab the current TIMINCA value from the register so that it can be - * double checked. If the register value has been cleared, it must be - * reset to the correct value for generating a cyclecounter. If - * TIMINCA is zero, the SYSTIME registers do not increment at all. - */ - timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); - - /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ - if (adapter->cycle_speed == cycle_speed && timinca) - return; - - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added * or subtracted. The drawbacks of a large factor include @@ -819,8 +732,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) * to nanoseconds using only a multiplier and a right-shift, * and (c) the value must fit within the timinca register space * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting */ - switch (cycle_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: incval = IXGBE_INCVAL_100; shift = IXGBE_INCVAL_SHIFT_100; @@ -830,6 +747,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) shift = IXGBE_INCVAL_SHIFT_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: + default: incval = IXGBE_INCVAL_10GB; shift = IXGBE_INCVAL_SHIFT_10GB; break; @@ -857,18 +775,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) return; } - /* reset the system time registers */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); - IXGBE_WRITE_FLUSH(hw); - - /* store the new cycle speed */ - adapter->cycle_speed = cycle_speed; - + /* update the base incval used to calculate frequency adjustment */ ACCESS_ONCE(adapter->base_incval) = incval; smp_mb(); - /* grab the ptp lock */ + /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); memset(&adapter->cc, 0, sizeof(adapter->cc)); @@ -877,6 +788,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) adapter->cc.shift = shift; adapter->cc.mult = 1; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ixgbe_ptp_reset + * @adapter: the ixgbe private board structure + * + * When the MAC resets, all timesync features are reset. This function should be + * called to re-enable the PTP clock structure. It will re-init the timecounter + * structure based on the kernel time as well as setup the cycle counter data. + */ +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* set SYSTIME registers to 0 just in case */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); + IXGBE_WRITE_FLUSH(hw); + + ixgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + /* reset the ns time counter */ timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); @@ -904,7 +840,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X540: - snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -918,7 +854,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) adapter->ptp_caps.enable = ixgbe_ptp_enable; break; case ixgbe_mac_82599EB: - snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -936,16 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) return; } - /* initialize the ptp filter */ - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) - e_dev_warn("ptp_filter_init failed\n"); - spin_lock_init(&adapter->tmreg_lock); - - ixgbe_ptp_start_cyclecounter(adapter); - - /* (Re)start the overflow check */ - adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; + INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); @@ -955,6 +883,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) } else e_dev_info("registered PHC device on %s\n", netdev->name); + ixgbe_ptp_reset(adapter); + + /* set the flag that PTP has been enabled */ + adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; + return; } @@ -967,11 +900,17 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { /* stop the overflow check task */ - adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | + adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | IXGBE_FLAG2_PTP_PPS_ENABLED); ixgbe_ptp_setup_sdp(adapter); + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index dce48bf64d96..d44b4d21268c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -44,50 +44,11 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int num_vf_macvlans, i; struct vf_macvlans *mv_list; - int pre_existing_vfs = 0; - - pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->num_vfs) - return; - - /* If there are pre-existing VFs then we have to force - * use of that many because they were not deleted the last - * time someone removed the PF driver. That would have - * been because they were allocated to guest VMs and can't - * be removed. Go ahead and just re-enable the old amount. - * If the user wants to change the number of VFs they can - * use ethtool while making sure no VFs are allocated to - * guest VMs... i.e. the right way. - */ - if (pre_existing_vfs) { - adapter->num_vfs = pre_existing_vfs; - dev_warn(&adapter->pdev->dev, "Virtual Functions already " - "enabled for this device - Please reload all " - "VF drivers to avoid spoofed packet errors\n"); - } else { - int err; - /* - * The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function. If the user requests greater thn - * 63 VFs then it is an error - reset to default of zero. - */ - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); - - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - adapter->num_vfs = 0; - return; - } - } adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); @@ -117,6 +78,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, } } + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + /* If call to enable VFs succeeded then allocate memory * for per VF control structures. */ @@ -124,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, kcalloc(adapter->num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL); if (adapter->vfinfo) { - /* Now that we're sure SR-IOV is enabled - * and memory allocated set up the mailbox parameters - */ - ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); - /* limit trafffic classes based on VFs enabled */ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (adapter->num_vfs < 16)) { @@ -150,23 +109,65 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); -#ifdef IXGBE_FCOE - /* - * When SR-IOV is enabled 82599 cannot support jumbo frames - * so we must disable FCoE because we cannot support FCoE MTU. - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | - IXGBE_FLAG_FCOE_CAPABLE); -#endif - /* enable spoof checking for all VFs */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].spoofchk_enabled = true; + return 0; + } + + return -ENOMEM; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) return; + + if (!pre_existing_vfs) + dev_warn(&adapter->pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } } - /* Oh oh */ + if (!__ixgbe_enable_sriov(adapter)) + return; + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ e_err(probe, "Unable to allocate memory for VF Data Storage - " "SRIOV disabled\n"); ixgbe_disable_sriov(adapter); @@ -206,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) } #endif /* #ifdef CONFIG_PCI_IOV */ -void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; + int rss; /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; @@ -225,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* if SR-IOV is already disabled then there is nothing to do */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; + return 0; #ifdef CONFIG_PCI_IOV /* @@ -235,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) */ if (ixgbe_vfs_are_assigned(adapter)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - return; + return -EPERM; } /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); @@ -258,15 +260,102 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->ring_feature[RING_F_VMDQ].offset = 0; + rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_RSS].limit = rss; + /* take a breather then clean up driver data */ msleep(100); adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + return 0; +} + +static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ixgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if (num_vfs > 63) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __ixgbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + ixgbe_sriov_reinit(adapter); + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +static int ixgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = ixgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* ixgbe_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; +#ifdef CONFIG_PCI_IOV + ixgbe_sriov_reinit(adapter); +#endif + } + + return err; +} + +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return ixgbe_pci_sriov_disable(dev); + else + return ixgbe_pci_sriov_enable(dev, num_vfs); } static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, - int entries, u16 *hash_list, u32 vf) + u32 *msgbuf, u32 vf) { + int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct ixgbe_hw *hw = &adapter->hw; int i; @@ -353,31 +442,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } -static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) +static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - int new_mtu = msgbuf[1]; + int max_frame = msgbuf[1]; u32 max_frs; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - /* Only X540 supports jumbo frames in IOV mode */ - if (adapter->hw.mac.type != ixgbe_mac_X540) - return; + /* + * For 82599EB we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_11: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + } /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { - e_err(drv, "VF mtu %d out of range\n", new_mtu); - return; + if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return -EINVAL; } - max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & - IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; - if (max_frs < new_mtu) { - max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + /* pull current max frame size from hardware */ + max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + max_frs &= IXGBE_MHADD_MFS_MASK; + max_frs >>= IXGBE_MHADD_MFS_SHIFT; + + if (max_frs < max_frame) { + max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); } - e_info(hw, "VF requests change max MTU to %d\n", new_mtu); + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; } static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) @@ -392,35 +539,38 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } -static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) +static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - if (vid) - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), - (vid | IXGBE_VMVIR_VLANA_DEFAULT)); - else - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); } - static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; int rar_entry = hw->mac.num_rar_entries - (vf + 1); + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ - if (adapter->vfinfo[vf].pf_vlan) { - ixgbe_set_vf_vlan(adapter, true, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, - (adapter->vfinfo[vf].pf_vlan | - (adapter->vfinfo[vf].pf_qos << - VLAN_PRIO_SHIFT)), vf); - ixgbe_set_vmolr(hw, vf, false); + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); } else { - ixgbe_set_vf_vlan(adapter, true, 0, vf); - ixgbe_set_vmvir(adapter, 0, vf); - ixgbe_set_vmolr(hw, vf, true); + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); } /* reset multicast table array for vf */ @@ -430,6 +580,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_rx_mode(adapter->netdev); hw->mac.ops.clear_rar(hw, rar_entry); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, @@ -521,30 +674,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) return 0; } -static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 reg; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, msgbuf[4]; u32 reg_offset, vf_shift; + u8 *addr = (u8 *)(&msgbuf[1]); + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ixgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + ixgbe_set_vf_mac(adapter, vf, vf_mac); vf_shift = vf % 32; reg_offset = vf / 32; - /* enable transmit and receive for vf */ + /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= (reg | (1 << vf_shift)); + reg |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= (reg | (1 << vf_shift)); + reg |= 1 << vf_shift; + /* + * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); - ixgbe_vf_reset_event(adapter, vf); + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + + return err; +} + +static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + } + + err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[IXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[IXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; } static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) @@ -553,10 +897,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; struct ixgbe_hw *hw = &adapter->hw; s32 retval; - int entries; - u16 *hash_list; - int add, vid, index; - u8 *new_mac; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); @@ -572,39 +912,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* flush the ack before we write any messages back */ IXGBE_WRITE_FLUSH(hw); + if (msgbuf[0] == IXGBE_VF_RESET) + return ixgbe_vf_reset_msg(adapter, vf); + /* * until the vf completes a virtual function reset it should not be * allowed to start any configuration. */ - - if (msgbuf[0] == IXGBE_VF_RESET) { - unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - new_mac = (u8 *)(&msgbuf[1]); - e_info(probe, "VF Reset msg received from vf %d\n", vf); - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_vf_reset_msg(adapter, vf); - adapter->vfinfo[vf].clear_to_send = true; - - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) - ixgbe_set_vf_mac(adapter, vf, vf_mac); - else - ixgbe_set_vf_mac(adapter, - vf, adapter->vfinfo[vf].vf_mac_addresses); - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(new_mac, vf_mac, ETH_ALEN); - /* - * Piggyback the multicast filter type so VF can compute the - * correct vectors - */ - msgbuf[3] = hw->mac.mc_filter_type; - ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); - - return retval; - } - if (!adapter->vfinfo[vf].clear_to_send) { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ixgbe_write_mbx(hw, msgbuf, 1, vf); @@ -613,70 +927,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) switch ((msgbuf[0] & 0xFFFF)) { case IXGBE_VF_SET_MAC_ADDR: - new_mac = ((u8 *)(&msgbuf[1])); - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) { - ixgbe_set_vf_mac(adapter, vf, new_mac); - } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, - new_mac, ETH_ALEN)) { - e_warn(drv, "VF %d attempted to override " - "administratively set MAC address\nReload " - "the VF driver to resume operations\n", vf); - retval = -1; - } + retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); break; case IXGBE_VF_SET_MULTICAST: - entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - hash_list = (u16 *)&msgbuf[1]; - retval = ixgbe_set_vf_multicasts(adapter, entries, - hash_list, vf); - break; - case IXGBE_VF_SET_LPE: - ixgbe_set_vf_lpe(adapter, msgbuf); + retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); break; case IXGBE_VF_SET_VLAN: - add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - if (adapter->vfinfo[vf].pf_vlan) { - e_warn(drv, "VF %d attempted to override " - "administratively set VLAN configuration\n" - "Reload the VF driver to resume operations\n", - vf); - retval = -1; - } else { - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (!retval && adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - } + retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_LPE: + retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); break; case IXGBE_VF_SET_MACVLAN: - index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> - IXGBE_VT_MSGINFO_SHIFT; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { - e_warn(drv, "VF %d requested MACVLAN filter but is " - "administratively denied\n", vf); - retval = -1; - break; - } - /* - * If the VF is allowed to set MAC filters then turn off - * anti-spoofing to avoid false positives. An index - * greater than 0 will indicate the VF is setting a - * macvlan MAC filter. - */ - if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); - retval = ixgbe_set_vf_macvlan(adapter, vf, index, - (unsigned char *)(&msgbuf[1])); - if (retval == -ENOSPC) - e_warn(drv, "VF %d has requested a MACVLAN filter " - "but there is no space for it\n", vf); + retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); @@ -692,7 +961,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, msgbuf, 1, vf); + ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); return retval; } @@ -783,7 +1052,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; - ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); @@ -803,7 +1072,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) } else { err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, vlan, vf); + ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); if (adapter->vfinfo[vf].vlan_count) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 1be1d30e4e78..4713f9fc7f46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -41,12 +41,20 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); -void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii); +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); #endif +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 16ddf14e8ba4..d118def16f35 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 0722f3368092..6652e96c352d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -56,6 +56,7 @@ #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -1833,15 +1834,6 @@ enum { /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 -/* Check whether address is multicast. This is little-endian specific check.*/ -#define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) - /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 #define IXGBE_RAH_VIND_SHIFT 18 @@ -1962,6 +1954,8 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 +#define IXGBE_FWSM_TS_ENABLED 0x1 + /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 #define IXGBE_QDE_IDX_MASK 0x00007F00 @@ -2828,7 +2822,7 @@ struct ixgbe_mac_operations { void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); @@ -2875,12 +2869,12 @@ struct ixgbe_phy_operations { s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); - s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, - bool); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); s32 (*check_overtemp)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index de4da5219b71..66c5e946284e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -72,14 +72,13 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed **/ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - return hw->phy.ops.setup_link_speed(hw, speed, autoneg, + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } @@ -152,7 +151,7 @@ mac_reset_top: hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + if (is_valid_ether_addr(hw->mac.san_addr)) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); @@ -879,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, |