diff options
Diffstat (limited to 'drivers/net/e1000e/lib.c')
-rw-r--r-- | drivers/net/e1000e/lib.c | 261 |
1 files changed, 134 insertions, 127 deletions
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 99ba2b8a2a05..a86c17548c1e 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. + Copyright(c) 1999 - 2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -26,11 +26,6 @@ *******************************************************************************/ -#include <linux/netdevice.h> -#include <linux/ethtool.h> -#include <linux/delay.h> -#include <linux/pci.h> - #include "e1000.h" enum e1000_mng_mode { @@ -87,7 +82,24 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) } /** - * e1000e_write_vfta - Write value to VLAN filter table + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table @@ -95,7 +107,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) { E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); e1e_flush(); @@ -115,12 +127,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) u32 i; /* Setup the receive address */ - hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); + e_dbg("Programming MAC Address into RAR[0]\n"); e1000e_rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); + e_dbg("Clearing RAR[1-%u]\n", rar_count-1); for (i = 1; i < rar_count; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); e1e_flush(); @@ -276,7 +288,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, for (; mc_addr_count > 0; mc_addr_count--) { u32 hash_value, hash_reg, hash_bit, mta; hash_value = e1000_hash_mc_addr(hw, mc_addr_list); - hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); + e_dbg("Hash value = 0x%03X\n", hash_value); hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); @@ -300,45 +312,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, **/ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) { - u32 temp; - - temp = er32(CRCERRS); - temp = er32(SYMERRS); - temp = er32(MPC); - temp = er32(SCC); - temp = er32(ECOL); - temp = er32(MCC); - temp = er32(LATECOL); - temp = er32(COLC); - temp = er32(DC); - temp = er32(SEC); - temp = er32(RLEC); - temp = er32(XONRXC); - temp = er32(XONTXC); - temp = er32(XOFFRXC); - temp = er32(XOFFTXC); - temp = er32(FCRUC); - temp = er32(GPRC); - temp = er32(BPRC); - temp = er32(MPRC); - temp = er32(GPTC); - temp = er32(GORCL); - temp = er32(GORCH); - temp = er32(GOTCL); - temp = er32(GOTCH); - temp = er32(RNBC); - temp = er32(RUC); - temp = er32(RFC); - temp = er32(ROC); - temp = er32(RJC); - temp = er32(TORL); - temp = er32(TORH); - temp = er32(TOTL); - temp = er32(TOTH); - temp = er32(TPR); - temp = er32(TPT); - temp = er32(MPTC); - temp = er32(BPTC); + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); } /** @@ -376,7 +386,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) if (!link) return ret_val; /* No link detected */ - mac->get_link_status = 0; + mac->get_link_status = false; /* * Check if there was DownShift, must be checked @@ -408,7 +418,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { - hw_dbg(hw, "Error configuring flow control\n"); + e_dbg("Error configuring flow control\n"); } return ret_val; @@ -448,7 +458,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -461,7 +471,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { - hw_dbg(hw, "Error configuring flow control\n"); + e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { @@ -471,7 +481,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -513,7 +523,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) mac->autoneg_failed = 1; return 0; } - hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); @@ -526,7 +536,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { - hw_dbg(hw, "Error configuring flow control\n"); + e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { @@ -536,7 +546,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ - hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -553,11 +563,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; - hw_dbg(hw, "SERDES: Link up - forced.\n"); + e_dbg("SERDES: Link up - forced.\n"); } } else { mac->serdes_has_link = false; - hw_dbg(hw, "SERDES: Link down - force failed.\n"); + e_dbg("SERDES: Link down - force failed.\n"); } } @@ -570,20 +580,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; - hw_dbg(hw, "SERDES: Link up - autoneg " + e_dbg("SERDES: Link up - autoneg " "completed sucessfully.\n"); } else { mac->serdes_has_link = false; - hw_dbg(hw, "SERDES: Link down - invalid" + e_dbg("SERDES: Link down - invalid" "codewords detected in autoneg.\n"); } } else { mac->serdes_has_link = false; - hw_dbg(hw, "SERDES: Link down - no sync.\n"); + e_dbg("SERDES: Link down - no sync.\n"); } } else { mac->serdes_has_link = false; - hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); + e_dbg("SERDES: Link down - autoneg failed\n"); } } @@ -614,7 +624,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } @@ -667,7 +677,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) */ hw->fc.current_mode = hw->fc.requested_mode; - hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary media_type subroutine to configure the link. */ @@ -681,7 +691,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) * control is disabled, because it does not hurt anything to * initialize these registers. */ - hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); + e_dbg("Initializing the Flow Control address, type and timer regs\n"); ew32(FCT, FLOW_CONTROL_TYPE); ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); @@ -751,7 +761,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); + e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; break; } @@ -789,7 +799,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) break; } if (i == FIBER_LINK_UP_LIMIT) { - hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); + e_dbg("Never got a valid link from auto-neg!!!\n"); mac->autoneg_failed = 1; /* * AutoNeg failed to achieve a link, so we'll call @@ -799,13 +809,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) */ ret_val = mac->ops.check_for_link(hw); if (ret_val) { - hw_dbg(hw, "Error while checking for link\n"); + e_dbg("Error while checking for link\n"); return ret_val; } mac->autoneg_failed = 0; } else { mac->autoneg_failed = 0; - hw_dbg(hw, "Valid Link Found\n"); + e_dbg("Valid Link Found\n"); } return 0; @@ -841,7 +851,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) * then the link-up status bit will be set and the flow control enable * bits (RFCE and TFCE) will be set according to their negotiated value. */ - hw_dbg(hw, "Auto-negotiation enabled\n"); + e_dbg("Auto-negotiation enabled\n"); ew32(CTRL, ctrl); e1e_flush(); @@ -856,7 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) (er32(CTRL) & E1000_CTRL_SWDPIN1)) { ret_val = e1000_poll_fiber_serdes_link_generic(hw); } else { - hw_dbg(hw, "No signal detected\n"); + e_dbg("No signal detected\n"); } return 0; @@ -952,7 +962,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ - hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); switch (hw->fc.current_mode) { case e1000_fc_none: @@ -970,7 +980,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: - hw_dbg(hw, "Flow control param set incorrectly\n"); + e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } @@ -1011,7 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) } if (ret_val) { - hw_dbg(hw, "Error forcing flow control settings\n"); + e_dbg("Error forcing flow control settings\n"); return ret_val; } @@ -1035,7 +1045,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) return ret_val; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - hw_dbg(hw, "Copper PHY and Auto Neg " + e_dbg("Copper PHY and Auto Neg " "has not completed.\n"); return ret_val; } @@ -1076,7 +1086,6 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * 1 | 1 | 0 | 0 | e1000_fc_none * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * - * * Are both PAUSE bits set to 1? If so, this implies * Symmetric Flow Control is enabled at both ends. The * ASM_DIR bits are irrelevant per the spec. @@ -1100,10 +1109,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - hw_dbg(hw, "Flow Control = FULL.\r\n"); + e_dbg("Flow Control = FULL.\r\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg(hw, "Flow Control = " + e_dbg("Flow Control = " "RX PAUSE frames only.\r\n"); } } @@ -1114,14 +1123,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - * */ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); + e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); } /* * For transmitting PAUSE frames ONLY. @@ -1130,21 +1138,20 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - * */ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); + e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); } else { /* * Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; - hw_dbg(hw, "Flow Control = NONE.\r\n"); + e_dbg("Flow Control = NONE.\r\n"); } /* @@ -1154,7 +1161,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { - hw_dbg(hw, "Error getting link speed and duplex\n"); + e_dbg("Error getting link speed and duplex\n"); return ret_val; } @@ -1167,7 +1174,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = e1000e_force_mac_fc(hw); if (ret_val) { - hw_dbg(hw, "Error forcing flow control settings\n"); + e_dbg("Error forcing flow control settings\n"); return ret_val; } } @@ -1191,21 +1198,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup status = er32(STATUS); if (status & E1000_STATUS_SPEED_1000) { *speed = SPEED_1000; - hw_dbg(hw, "1000 Mbs, "); + e_dbg("1000 Mbs, "); } else if (status & E1000_STATUS_SPEED_100) { *speed = SPEED_100; - hw_dbg(hw, "100 Mbs, "); + e_dbg("100 Mbs, "); } else { *speed = SPEED_10; - hw_dbg(hw, "10 Mbs, "); + e_dbg("10 Mbs, "); } if (status & E1000_STATUS_FD) { *duplex = FULL_DUPLEX; - hw_dbg(hw, "Full Duplex\n"); + e_dbg("Full Duplex\n"); } else { *duplex = HALF_DUPLEX; - hw_dbg(hw, "Half Duplex\n"); + e_dbg("Half Duplex\n"); } return 0; @@ -1251,7 +1258,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) } if (i == timeout) { - hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); + e_dbg("Driver can't access device - SMBI bit is set.\n"); return -E1000_ERR_NVM; } @@ -1270,7 +1277,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (i == timeout) { /* Release semaphores */ e1000e_put_hw_semaphore(hw); - hw_dbg(hw, "Driver can't access the NVM\n"); + e_dbg("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } @@ -1310,7 +1317,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) } if (i == AUTO_READ_DONE_TIMEOUT) { - hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); + e_dbg("Auto read by HW from NVM has not completed.\n"); return -E1000_ERR_RESET; } @@ -1331,7 +1338,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } @@ -1585,7 +1592,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) } if (!timeout) { - hw_dbg(hw, "Master requests are pending.\n"); + e_dbg("Master requests are pending.\n"); return -E1000_ERR_MASTER_REQUESTS_PENDING; } @@ -1608,7 +1615,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) mac->ifs_step_size = IFS_STEP; mac->ifs_ratio = IFS_RATIO; - mac->in_ifs_mode = 0; + mac->in_ifs_mode = false; ew32(AIT, 0); } @@ -1625,7 +1632,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { if (mac->tx_packet_delta > MIN_NUM_XMITS) { - mac->in_ifs_mode = 1; + mac->in_ifs_mode = true; if (mac->current_ifs_val < mac->ifs_max_val) { if (!mac->current_ifs_val) mac->current_ifs_val = mac->ifs_min_val; @@ -1639,7 +1646,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) if (mac->in_ifs_mode && (mac->tx_packet_delta <= MIN_NUM_XMITS)) { mac->current_ifs_val = 0; - mac->in_ifs_mode = 0; + mac->in_ifs_mode = false; ew32(AIT, 0); } } @@ -1809,7 +1816,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw) if (!timeout) { eecd &= ~E1000_EECD_REQ; ew32(EECD, eecd); - hw_dbg(hw, "Could not acquire NVM grant\n"); + e_dbg("Could not acquire NVM grant\n"); return -E1000_ERR_NVM; } @@ -1914,7 +1921,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) } if (!timeout) { - hw_dbg(hw, "SPI NVM Status error\n"); + e_dbg("SPI NVM Status error\n"); return -E1000_ERR_NVM; } } @@ -1943,7 +1950,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } @@ -1986,11 +1993,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } - ret_val = nvm->ops.acquire_nvm(hw); + ret_val = nvm->ops.acquire(hw); if (ret_val) return ret_val; @@ -2001,7 +2008,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ret_val = e1000_ready_nvm_eeprom(hw); if (ret_val) { - nvm->ops.release_nvm(hw); + nvm->ops.release(hw); return ret_val; } @@ -2040,7 +2047,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) } msleep(10); - nvm->ops.release_nvm(hw); + nvm->ops.release(hw); return 0; } @@ -2066,7 +2073,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &mac_addr_offset); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } if (mac_addr_offset == 0xFFFF) @@ -2081,7 +2088,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } if (nvm_data & 0x0001) @@ -2096,7 +2103,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) offset = mac_addr_offset + (i >> 1); ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); @@ -2129,14 +2136,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { - hw_dbg(hw, "NVM Checksum Invalid\n"); + e_dbg("NVM Checksum Invalid\n"); return -E1000_ERR_NVM; } @@ -2160,7 +2167,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error while updating checksum.\n"); + e_dbg("NVM Read Error while updating checksum.\n"); return ret_val; } checksum += nvm_data; @@ -2168,7 +2175,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) checksum = (u16) NVM_SUM - checksum; ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) - hw_dbg(hw, "NVM Write Error while updating checksum.\n"); + e_dbg("NVM Write Error while updating checksum.\n"); return ret_val; } @@ -2231,7 +2238,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) /* Check that the host interface is enabled. */ hicr = er32(HICR); if ((hicr & E1000_HICR_EN) == 0) { - hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); + e_dbg("E1000_HOST_EN bit disabled.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } /* check the previous command is completed */ @@ -2243,7 +2250,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) } if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { - hw_dbg(hw, "Previous command timeout failed .\n"); + e_dbg("Previous command timeout failed .\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } @@ -2282,7 +2289,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) /* No manageability, no filtering */ if (!e1000e_check_mng_mode(hw)) { - hw->mac.tx_pkt_filtering = 0; + hw->mac.tx_pkt_filtering = false; return 0; } @@ -2292,7 +2299,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) */ ret_val = e1000_mng_enable_host_if(hw); if (ret_val != 0) { - hw->mac.tx_pkt_filtering = 0; + hw->mac.tx_pkt_filtering = false; return ret_val; } @@ -2311,17 +2318,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) * take the safe route of assuming Tx filtering is enabled. */ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { - hw->mac.tx_pkt_filtering = 1; + hw->mac.tx_pkt_filtering = true; return 1; } /* Cookie area is valid, make the final check for filtering. */ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { - hw->mac.tx_pkt_filtering = 0; + hw->mac.tx_pkt_filtering = false; return 0; } - hw->mac.tx_pkt_filtering = 1; + hw->mac.tx_pkt_filtering = true; return 1; } @@ -2353,7 +2360,7 @@ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, } /** - * e1000_mng_host_if_write - Writes to the manageability host interface + * e1000_mng_host_if_write - Write to the manageability host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface buffer * @length: size of the buffer @@ -2478,7 +2485,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) { u32 manc; u32 fwsm, factps; - bool ret_val = 0; + bool ret_val = false; manc = er32(MANC); @@ -2493,13 +2500,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) if (!(factps & E1000_FACTPS_MNGCG) && ((fwsm & E1000_FWSM_MODE_MASK) == (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = 1; + ret_val = true; return ret_val; } } else { if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) { - ret_val = 1; + ret_val = true; return ret_val; } } @@ -2514,14 +2521,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } *pba_num = (u32)(nvm_data << 16); ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); + e_dbg("NVM Read Error\n"); return ret_val; } *pba_num |= nvm_data; |