summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c199
-rw-r--r--drivers/net/bonding/bond_alb.c150
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/bonding/bond_main.c715
-rw-r--r--drivers/net/bonding/bond_netlink.c131
-rw-r--r--drivers/net/bonding/bond_options.c142
-rw-r--r--drivers/net/bonding/bond_procfs.c21
-rw-r--r--drivers/net/bonding/bond_sysfs.c264
-rw-r--r--drivers/net/bonding/bonding.h113
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c4
-rw-r--r--drivers/net/can/dev.c67
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/mscan.h6
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/softing/softing.h24
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c12
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c1
-rw-r--r--drivers/net/ethernet/8390/8390.h40
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h2
-rw-r--r--drivers/net/ethernet/amd/7990.h12
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c16
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c7
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c117
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c80
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c149
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c154
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h43
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h332
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c315
-rw-r--r--drivers/net/ethernet/fealnx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c87
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h26
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h14
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h18
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/icplus/ipg.c1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h32
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c484
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h74
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c145
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c61
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h22
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h22
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h258
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c589
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c234
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h178
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c255
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/jme.h1
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c110
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h56
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h9
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c145
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c78
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c184
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h109
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c260
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c67
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c434
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h60
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c127
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c319
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.h105
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/io.h5
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h120
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.c73
-rw-r--r--drivers/net/ethernet/sfc/nic.h256
-rw-r--r--drivers/net/ethernet/sfc/phy.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c90
-rw-r--r--drivers/net/ethernet/sfc/selftest.h15
-rw-r--r--drivers/net/ethernet/sfc/tx.c426
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c1
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c126
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c331
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c56
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c43
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c237
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c169
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c161
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h29
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c41
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h28
-rw-r--r--drivers/net/fddi/skfp/skfddi.c6
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/irda/bfin_sir.c4
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir-dev.h29
-rw-r--r--drivers/net/macvlan.c47
-rw-r--r--drivers/net/netconsole.c57
-rw-r--r--drivers/net/phy/Kconfig7
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c57
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/mdio-moxart.c201
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/realtek.c15
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/catc.c8
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c104
-rw-r--r--drivers/net/usb/cdc_ncm.c507
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c230
-rw-r--r--drivers/net/usb/qmi_wwan.c69
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c219
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c37
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/x25_asy.h2
-rw-r--r--drivers/net/wan/z85230.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h27
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h117
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/Kconfig18
-rw-r--r--drivers/net/wireless/ath/Makefile5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c397
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h126
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h80
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c157
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c241
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c314
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c287
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c732
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c465
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c67
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1277
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1037
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h218
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h73
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c564
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h112
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c140
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c327
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c70
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c)23
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h)28
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c)10
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h (renamed from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h)2
-rw-r--r--drivers/net/wireless/ath/regd.c140
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c805
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4657
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1036
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h127
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h160
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h238
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/atmel.c94
-rw-r--r--drivers/net/wireless/b43/phy_n.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c186
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c343
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.h14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h20
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h110
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h219
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h371
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h44
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h87
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h82
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h66
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c638
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c515
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h149
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c81
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c256
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h89
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c70
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c42
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c793
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h163
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c462
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c206
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h95
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c127
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c43
-rw-r--r--drivers/net/wireless/libertas/firmware.c5
-rw-r--r--drivers/net/wireless/libertas/if_cs.c8
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/libertas/if_usb.c17
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h24
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h31
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig28
-rw-r--r--drivers/net/wireless/rt2x00/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h44
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c289
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c873
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h165
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c951
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h97
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c263
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c29
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h10
-rw-r--r--drivers/net/wireless/rtlwifi/core.c10
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c18
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h52
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c187
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c6
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c18
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c70
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c18
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c232
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c51
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h11
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/xen-netback/common.h12
-rw-r--r--drivers/net/xen-netback/interface.c16
-rw-r--r--drivers/net/xen-netback/netback.c294
-rw-r--r--drivers/net/xen-netback/xenbus.c52
-rw-r--r--drivers/net/xen-netfront.c4
632 files changed, 34179 insertions, 13550 deletions
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6b8b2f..5a5d720da929 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0d8f427ade93..187b1b7772ef 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -136,41 +136,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
}
/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
- struct slave *first_slave = bond_first_slave(bond);
-
- return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
- struct bonding *bond = __get_bond_by_port(port);
- struct slave *slave = port->slave, *slave_next;
-
- // If there's no bond for this port, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).port);
-}
-
-/**
* __get_first_agg - get the first aggregator in the bond
* @bond: the bond we're looking at
*
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
- struct slave *slave = aggregator->slave, *slave_next;
- struct bonding *bond = bond_get_bond_by_slave(slave);
-
- // If there's no bond for this aggregator, or this is the last slave
- if (bond == NULL)
- return NULL;
- slave_next = bond_next_slave(bond, slave);
- if (!slave_next || bond_is_first_slave(bond, slave_next))
- return NULL;
-
- return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
/*
* __agg_has_partner
*
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
- struct aggregator *retval = NULL;
+ struct bonding *bond = aggregator->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
- for (; aggregator; aggregator = __get_next_agg(aggregator)) {
- if (aggregator->is_active) {
- retval = aggregator;
- break;
- }
- }
+ bond_for_each_slave(bond, slave, iter)
+ if (SLAVE_AD_INFO(slave).aggregator.is_active)
+ return &(SLAVE_AD_INFO(slave).aggregator);
- return retval;
+ return NULL;
}
/**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
{
struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
struct port *last_port = NULL, *curr_port;
+ struct list_head *iter;
+ struct bonding *bond;
+ struct slave *slave;
int found = 0;
// if the port is already Selected, do nothing
if (port->sm_vars & AD_PORT_SELECTED)
return;
+ bond = __get_bond_by_port(port);
+
// if the port is connected to other aggregator, detach it
if (port->aggregator) {
// detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
}
}
// search on all aggregators for a suitable aggregator for this port
- for (aggregator = __get_first_agg(port); aggregator;
- aggregator = __get_next_agg(aggregator)) {
+ bond_for_each_slave(bond, slave, iter) {
+ aggregator = &(SLAVE_AD_INFO(slave).aggregator);
// keep a free aggregator for later use(if needed)
if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
static void ad_agg_selection_logic(struct aggregator *agg)
{
struct aggregator *best, *active, *origin;
+ struct bonding *bond = agg->slave->bond;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
- do {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
+
agg->is_active = 0;
if (agg->num_of_ports && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
-
- } while ((agg = __get_next_agg(agg)));
+ }
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->lag_ports, best->slave,
best->slave ? best->slave->dev->name : "NULL");
- for (agg = __get_first_agg(best->lag_ports); agg;
- agg = __get_next_agg(agg)) {
+ bond_for_each_slave(bond, slave, iter) {
+ agg = &(SLAVE_AD_INFO(slave).aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
}
}
- if (origin->slave) {
- struct bonding *bond;
-
- bond = bond_get_bond_by_slave(origin->slave);
- if (bond)
- bond_3ad_set_carrier(bond);
- }
+ bond_3ad_set_carrier(bond);
}
/**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct port *port, *prev_port, *temp_port;
struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
int select_new_active_agg = 0;
+ struct bonding *bond = slave->bond;
+ struct slave *slave_iter;
+ struct list_head *iter;
// find the aggregator related to this slave
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
// reason to search for new aggregator, and that we will find one
if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
// find new aggregator for the related port(s)
- new_aggregator = __get_first_agg(port);
- for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
// if the new aggregator is empty, or it is connected to our port only
if (!new_aggregator->lag_ports
|| ((new_aggregator->lag_ports == port)
&& !new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
+ if (!slave_iter)
+ new_aggregator = NULL;
// if new aggregator found, copy the aggregator's parameters
// and connect the related lag_ports to the new aggregator
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
pr_info("%s: Removing an active aggregator\n",
slave->bond->dev->name);
// select new active aggregator
- ad_agg_selection_logic(__get_first_agg(port));
+ temp_aggregator = __get_first_agg(port);
+ if (temp_aggregator)
+ ad_agg_selection_logic(temp_aggregator);
}
}
}
pr_debug("Unbinding port %d\n", port->actor_port_number);
// find the aggregator that this port is connected to
- temp_aggregator = __get_first_agg(port);
- for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+ bond_for_each_slave(bond, slave_iter, iter) {
+ temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
prev_port = NULL;
// search the port in the aggregator's related ports
for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
ad_work.work);
- struct port *port;
struct aggregator *aggregator;
+ struct list_head *iter;
+ struct slave *slave;
+ struct port *port;
read_lock(&bond->lock);
//check if there are any slaves
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
// check if agg_select_timer timer after initialize is timed out
if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ slave = bond_first_slave(bond);
+ port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
// select the active aggregator for the bond
- if ((port = __get_first_port(bond))) {
+ if (port) {
if (!port->slave) {
pr_warning("%s: Warning: bond's first port is uninitialized\n",
bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
// for each port run the state machines
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
pr_warning("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info)
{
struct aggregator *aggregator = NULL;
+ struct list_head *iter;
+ struct slave *slave;
struct port *port;
- for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave).port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
{
int ret;
- read_lock(&bond->lock);
+ rcu_read_lock();
ret = __bond_3ad_get_active_agg_info(bond, ad_info);
- read_unlock(&bond->lock);
+ rcu_read_unlock();
return ret;
}
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
{
- struct slave *slave, *start_at;
struct bonding *bond = netdev_priv(dev);
- int slave_agg_no;
- int slaves_in_agg;
- int agg_id;
- int i;
+ struct slave *slave, *first_ok_slave;
+ struct aggregator *agg;
struct ad_info ad_info;
+ struct list_head *iter;
+ int slaves_in_agg;
+ int slave_agg_no;
int res = 1;
+ int agg_id;
- read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
agg_id = ad_info.aggregator_id;
if (slaves_in_agg == 0) {
- /*the aggregator is empty*/
pr_debug("%s: Error: active aggregator is empty\n", dev->name);
goto out;
}
- slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ first_ok_slave = NULL;
- bond_for_each_slave(bond, slave) {
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
- if (agg && (agg->aggregator_identifier == agg_id)) {
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && SLAVE_IS_OK(slave))
+ first_ok_slave = slave;
slave_agg_no--;
- if (slave_agg_no < 0)
- break;
+ continue;
+ }
+
+ if (SLAVE_IS_OK(slave)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ goto out;
}
}
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- start_at = slave;
-
- bond_for_each_slave_from(bond, slave, i, start_at) {
- int slave_agg_id = 0;
- struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
- if (agg)
- slave_agg_id = agg->aggregator_identifier;
-
- if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found. */
+ if (first_ok_slave)
+ res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
out:
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
struct port *port = NULL;
+ struct list_head *iter;
struct slave *slave;
int lacp_fast;
lacp_fast = bond->params.lacp_fast;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
__get_state_machine_lock(port);
if (lacp_fast)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef574372..02872405d35d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
+ struct list_head *iter;
long long max_gap;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
@@ -382,30 +383,64 @@ out:
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct slave *rx_slave, *slave, *start_at;
- int i = 0;
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
- if (bond_info->next_rx_slave)
- start_at = bond_info->next_rx_slave;
- else
- start_at = bond_first_slave(bond);
+ bond_for_each_slave(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
+ rx_slave = slave;
+ }
+ if (slave == bond_info->rx_slave)
+ found = true;
+ }
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- rx_slave = NULL;
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (SLAVE_IS_OK(slave)) {
- if (!rx_slave) {
- rx_slave = slave;
- } else if (slave->speed > rx_slave->speed) {
+ return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct slave *before = NULL, *rx_slave = NULL, *slave;
+ struct list_head *iter;
+ bool found = false;
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (!SLAVE_IS_OK(slave))
+ continue;
+ if (!found) {
+ if (!before || before->speed < slave->speed)
+ before = slave;
+ } else {
+ if (!rx_slave || rx_slave->speed < slave->speed)
rx_slave = slave;
- }
}
+ if (slave == bond_info->rx_slave)
+ found = true;
}
+ /* we didn't find anything after the current or we have something
+ * better before and up to the current slave
+ */
+ if (!rx_slave || (before && rx_slave->speed < before->speed))
+ rx_slave = before;
- if (rx_slave) {
- slave = bond_next_slave(bond, rx_slave);
- bond_info->next_rx_slave = slave;
- }
+ if (rx_slave)
+ bond_info->rx_slave = rx_slave;
return rx_slave;
}
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct arp_pkt *arp = arp_pkt(skb);
- struct slave *assigned_slave;
+ struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
_lock_rx_hashtbl(bond);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* that the new client can be assigned to this entry.
*/
if (bond->curr_active_slave &&
- client_info->slave != bond->curr_active_slave) {
- client_info->slave = bond->curr_active_slave;
+ client_info->slave != curr_active_slave) {
+ client_info->slave = curr_active_slave;
rlb_update_client(client_info);
}
}
}
/* assign a new slave */
- assigned_slave = rlb_next_rx_slave(bond);
+ assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave) {
if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
*/
- if (!bond_slave_has_mac(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
/* loop through vlans and send one packet for each */
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper->priv_flags & IFF_802_1Q_VLAN)
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*/
static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
{
- struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave;
+ struct slave *tmp_slave1, *free_mac_slave = NULL;
+ struct list_head *iter;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* this is the first slave */
return 0;
}
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
- bond_for_each_slave(bond, tmp_slave1) {
+ bond_for_each_slave(bond, tmp_slave1, iter) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
- char tmp_addr[ETH_ALEN];
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
struct sockaddr sa;
+ char tmp_addr[ETH_ALEN];
int res;
if (bond->alb_info.rlb_enabled)
return 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1274,10 +1313,12 @@ unwind:
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
- dev_set_mac_address(slave->dev, &sa);
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+ memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ dev_set_mac_address(rollback_slave->dev, &sa);
+ memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
}
return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
- /* make sure that the curr_active_slave do not change during tx
- */
- read_lock(&bond->lock);
- read_lock(&bond->curr_slave_lock);
-
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (!tx_slave) {
/* unbalanced or unassigned, send through primary */
- tx_slave = bond->curr_active_slave;
+ tx_slave = rcu_dereference(bond->curr_active_slave);
bond_info->unbalanced_load += skb->len;
}
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != bond->curr_active_slave) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
memcpy(eth_data->h_source,
tx_slave->dev->dev_addr,
ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
- read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct list_head *iter;
struct slave *slave;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
alb_send_learning_packets(slave, slave->dev->dev_addr);
read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
alb_change_hw_addr_on_detach(bond, slave);
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
- bond->alb_info.next_rx_slave = NULL;
+ bond->alb_info.rx_slave = NULL;
rlb_clear_slave(bond, slave);
}
}
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
swap_slave = bond->curr_active_slave;
rcu_assign_pointer(bond->curr_active_slave, new_slave);
- if (!new_slave || list_empty(&bond->slave_list))
+ if (!new_slave || !bond_has_slaves(bond))
return;
/* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
ASSERT_RTNL();
+ /* in TLB mode, the slave might flip down/up with the old dev_addr,
+ * and thus filter bond->dev_addr's packets, so force bond's mac
+ */
+ if (bond->params.mode == BOND_MODE_TLB) {
+ struct sockaddr sa;
+ u8 tmp_addr[ETH_ALEN];
+
+ memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+ memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+ sa.sa_family = bond->dev->type;
+ /* we don't care if it can't change its mac, best effort */
+ dev_set_mac_address(new_slave->dev, &sa);
+
+ memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ }
+
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index c5eff5dafdfe..4226044efd08 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -154,9 +154,7 @@ struct alb_bond_info {
u8 rx_ntt; /* flag - need to transmit
* to all rx clients
*/
- struct slave *next_rx_slave;/* next slave to be assigned
- * to a new rx client for
- */
+ struct slave *rx_slave;/* last slave to xmit from */
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e883bfe2e727..4dd5ee2a34cc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -78,6 +78,8 @@
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
+#include <net/flow_keys.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -110,6 +112,7 @@ static char *fail_over_mac;
static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
+static int packets_per_slave = 1;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -159,7 +162,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
- "2 for layer 2+3");
+ "2 for layer 2+3, 3 for encap layer 2+3, "
+ "4 for encap layer 3+4");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
@@ -181,6 +185,10 @@ MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
module_param(resend_igmp, int, 0);
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
"link failure");
+module_param(packets_per_slave, int, 0);
+MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
+ "mode; 0 for a random slave, 1 packet per "
+ "slave (default), >1 packets per slave.");
/*----------------------------- Global variables ----------------------------*/
@@ -217,6 +225,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
+{ "encap2+3", BOND_XMIT_POLICY_ENCAP23},
+{ "encap3+4", BOND_XMIT_POLICY_ENCAP34},
{ NULL, -1},
};
@@ -332,10 +342,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
@@ -344,9 +355,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
return 0;
unwind:
- /* unwind from the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave)
- vlan_vid_del(slave->dev, proto, vid);
+ /* unwind to the slave that failed */
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ if (rollback_slave == slave)
+ break;
+
+ vlan_vid_del(rollback_slave->dev, proto, vid);
+ }
return res;
}
@@ -360,9 +375,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
vlan_vid_del(slave->dev, proto, vid);
if (bond_is_lb(bond))
@@ -382,15 +398,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
*/
static int bond_set_carrier(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto down;
if (bond->params.mode == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
@@ -522,7 +539,9 @@ static int bond_check_dev_link(struct bonding *bond,
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -532,7 +551,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
@@ -546,7 +565,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
*/
static int bond_set_allmulti(struct bonding *bond, int inc)
{
+ struct list_head *iter;
int err = 0;
+
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
@@ -556,7 +577,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
} else {
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
@@ -774,43 +795,24 @@ static bool bond_should_change_active(struct bonding *bond)
/**
* find_best_interface - select the best available slave to be the active one
* @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
- struct slave *new_active, *old_active;
- struct slave *bestslave = NULL;
+ struct slave *slave, *bestslave = NULL;
+ struct list_head *iter;
int mintime = bond->params.updelay;
- int i;
- new_active = bond->curr_active_slave;
-
- if (!new_active) { /* there were no active slaves left */
- new_active = bond_first_slave(bond);
- if (!new_active)
- return NULL; /* still no slave, return NULL */
- }
-
- if ((bond->primary_slave) &&
- bond->primary_slave->link == BOND_LINK_UP &&
- bond_should_change_active(bond)) {
- new_active = bond->primary_slave;
- }
-
- /* remember where to stop iterating over the slaves */
- old_active = new_active;
-
- bond_for_each_slave_from(bond, new_active, i, old_active) {
- if (new_active->link == BOND_LINK_UP) {
- return new_active;
- } else if (new_active->link == BOND_LINK_BACK &&
- IS_UP(new_active->dev)) {
- /* link up, but waiting for stabilization */
- if (new_active->delay < mintime) {
- mintime = new_active->delay;
- bestslave = new_active;
- }
+ if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+ bond_should_change_active(bond))
+ return bond->primary_slave;
+
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP)
+ return slave;
+ if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+ slave->delay < mintime) {
+ mintime = slave->delay;
+ bestslave = slave;
}
}
@@ -971,35 +973,6 @@ void bond_select_active_slave(struct bonding *bond)
}
}
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
- list_add_tail_rcu(&new_slave->list, &bond->slave_list);
- bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
- list_del_rcu(&slave->list);
- bond->slave_cnt--;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline int slave_enable_netpoll(struct slave *slave)
{
@@ -1046,9 +1019,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
@@ -1056,10 +1030,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
struct slave *slave;
int err = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
err = slave_enable_netpoll(slave);
if (err) {
bond_netpoll_cleanup(dev);
@@ -1087,10 +1062,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
netdev_features_t mask;
struct slave *slave;
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
/* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
return features;
@@ -1100,7 +1076,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
@@ -1118,16 +1094,17 @@ static void bond_compute_features(struct bonding *bond)
{
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+ struct net_device *bond_dev = bond->dev;
+ struct list_head *iter;
+ struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
- struct net_device *bond_dev = bond->dev;
u16 gso_max_segs = GSO_MAX_SEGS;
- struct slave *slave;
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto done;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
@@ -1233,15 +1210,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
static int bond_master_upper_dev_link(struct net_device *bond_dev,
- struct net_device *slave_dev)
+ struct net_device *slave_dev,
+ struct slave *slave)
{
int err;
- err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+ err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
if (err)
return err;
slave_dev->flags |= IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
return 0;
}
@@ -1250,7 +1228,7 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
{
netdev_upper_dev_unlink(slave_dev, bond_dev);
slave_dev->flags &= ~IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
}
/* enslave device <slave> to bond device <master> */
@@ -1258,7 +1236,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct slave *new_slave = NULL;
+ struct slave *new_slave = NULL, *prev_slave;
struct sockaddr addr;
int link_reporting;
int res = 0, i;
@@ -1313,7 +1291,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
bond_dev->name,
@@ -1352,7 +1330,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (slave_ops->ndo_set_mac_address == NULL) {
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1346,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
@@ -1377,7 +1355,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = -ENOMEM;
goto err_undo_flags;
}
- INIT_LIST_HEAD(&new_slave->list);
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1413,17 +1390,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = bond_master_upper_dev_link(bond_dev, slave_dev);
- if (res) {
- pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
- goto err_restore_mac;
- }
-
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unset_master;
+ goto err_restore_mac;
}
new_slave->bond = bond;
@@ -1479,21 +1450,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- write_lock_bh(&bond->lock);
-
- bond_attach_slave(bond, new_slave);
+ prev_slave = bond_last_slave(bond);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- write_unlock_bh(&bond->lock);
-
- bond_compute_features(bond);
-
bond_update_speed_duplex(new_slave);
- read_lock(&bond->lock);
-
new_slave->last_arp_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1517,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- write_lock_bh(&bond->curr_slave_lock);
-
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1528,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
bond_set_slave_inactive_flags(new_slave);
/* if this is the first slave */
- if (bond_first_slave(bond) == new_slave) {
+ if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
- struct slave *prev_slave;
-
- prev_slave = bond_prev_slave(bond, new_slave);
SLAVE_AD_INFO(new_slave).id =
SLAVE_AD_INFO(prev_slave).id + 1;
}
@@ -1588,7 +1545,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave);
- bond_select_active_slave(bond);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1562,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
break;
} /* switch(bond_mode) */
- write_unlock_bh(&bond->curr_slave_lock);
-
- bond_set_carrier(bond);
-
#ifdef CONFIG_NET_POLL_CONTROLLER
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
@@ -1624,17 +1576,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
#endif
- read_unlock(&bond->lock);
-
- res = bond_create_slave_symlinks(bond_dev, slave_dev);
- if (res)
- goto err_detach;
-
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
- goto err_dest_symlinks;
+ goto err_detach;
+ }
+
+ res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+ if (res) {
+ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+ goto err_unregister;
+ }
+
+ bond->slave_cnt++;
+ bond_compute_features(bond);
+ bond_set_carrier(bond);
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1610,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
-err_dest_symlinks:
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+ netdev_rx_handler_unregister(slave_dev);
err_detach:
if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1619,6 @@ err_detach:
vlan_vids_del_by_dev(slave_dev, bond_dev);
write_lock_bh(&bond->lock);
- bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1638,6 @@ err_close:
slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
-err_unset_master:
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
err_restore_mac:
if (!bond->params.fail_over_mac) {
/* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1656,8 @@ err_free:
kfree(new_slave);
err_undo_flags:
- bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
- if (list_empty(&bond->slave_list) &&
+ if (!bond_has_slaves(bond) &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
@@ -1749,6 +1708,11 @@ static int __bond_release_one(struct net_device *bond_dev,
}
write_unlock_bh(&bond->lock);
+
+ /* release the slave from its bond */
+ bond->slave_cnt--;
+
+ bond_upper_dev_unlink(bond_dev, slave_dev);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
@@ -1772,12 +1736,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- /* release the slave from its bond */
- bond_detach_slave(bond, slave);
-
if (!all && !bond->params.fail_over_mac) {
if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
- !list_empty(&bond->slave_list))
+ bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
bond_dev->name, slave_dev->name,
slave->perm_hwaddr,
@@ -1820,7 +1781,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
}
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
@@ -1836,7 +1797,7 @@ static int __bond_release_one(struct net_device *bond_dev,
unblock_netpoll_tx();
synchronize_rcu();
- if (list_empty(&bond->slave_list)) {
+ if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
@@ -1848,8 +1809,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1832,6 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_flush(bond_dev, slave_dev);
}
- bond_upper_dev_unlink(bond_dev, slave_dev);
-
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
@@ -1913,7 +1870,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = bond_release(bond_dev, slave_dev);
- if (ret == 0 && list_empty(&bond->slave_list)) {
+ if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
@@ -1922,61 +1879,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
return ret;
}
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- * - <slave_dev> is not found in the list.
- * - There is not active slave now.
- * - <slave_dev> is already active.
- * - The link state of <slave_dev> is not BOND_LINK_UP.
- * - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
- int res = 0;
-
- if (!USES_PRIMARY(bond->params.mode))
- return -EINVAL;
-
- /* Verify that bond_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) ||
- !netdev_has_upper_dev(slave_dev, bond_dev))
- return -EINVAL;
-
- read_lock(&bond->lock);
-
- old_active = bond->curr_active_slave;
- new_active = bond_get_slave_by_dev(bond, slave_dev);
- /*
- * Changing to the current active: do nothing; return success.
- */
- if (new_active && new_active == old_active) {
- read_unlock(&bond->lock);
- return 0;
- }
-
- if (new_active &&
- old_active &&
- new_active->link == BOND_LINK_UP &&
- IS_UP(new_active->dev)) {
- block_netpoll_tx();
- write_lock_bh(&bond->curr_slave_lock);
- bond_change_active_slave(bond, new_active);
- write_unlock_bh(&bond->curr_slave_lock);
- unblock_netpoll_tx();
- } else
- res = -EINVAL;
-
- read_unlock(&bond->lock);
-
- return res;
-}
-
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1896,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
int i = 0, res = -ENODEV;
struct slave *slave;
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1922,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
static int bond_miimon_inspect(struct bonding *bond)
{
int link_state, commit = 0;
+ struct list_head *iter;
struct slave *slave;
bool ignore_updelay;
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2022,10 @@ static int bond_miimon_inspect(struct bonding *bond)
static void bond_miimon_commit(struct bonding *bond)
{
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2225,7 +2130,7 @@ void bond_mii_monitor(struct work_struct *work)
delay = msecs_to_jiffies(bond->params.miimon);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2179,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return true;
rcu_read_lock();
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (ip == bond_confirm_addr(upper, 0, ip)) {
ret = true;
break;
@@ -2349,10 +2254,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
*
* TODO: QinQ?
*/
- netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+ vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
- netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+ iter) {
if (upper == rt->dst.dev) {
vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
@@ -2365,7 +2272,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* our upper vlans, then just search for any dev that
* matches, and in case it's a vlan - save the id
*/
- netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
/* if it's a vlan - get its VID */
if (is_vlan_dev(upper))
@@ -2512,11 +2419,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
struct slave *slave, *oldcurrent;
+ struct list_head *iter;
int do_failover = 0;
read_lock(&bond->lock);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2436,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2527,11 @@ re_arm:
static int bond_ab_arp_inspect(struct bonding *bond)
{
unsigned long trans_start, last_rx;
+ struct list_head *iter;
struct slave *slave;
int commit = 0;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
last_rx = slave_last_rx(bond, slave);
@@ -2689,9 +2598,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
static void bond_ab_arp_commit(struct bonding *bond)
{
unsigned long trans_start;
+ struct list_head *iter;
struct slave *slave;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2762,8 +2672,9 @@ do_failover:
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave, *next_slave;
- int i;
+ struct slave *slave, *before = NULL, *new_slave = NULL;
+ struct list_head *iter;
+ bool found = false;
read_lock(&bond->curr_slave_lock);
@@ -2793,18 +2704,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(bond->current_arp_slave);
- /* search for next candidate */
- next_slave = bond_next_slave(bond, bond->current_arp_slave);
- bond_for_each_slave_from(bond, slave, i, next_slave) {
- if (IS_UP(slave->dev)) {
- slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(slave);
- bond_arp_send_all(bond, slave);
- slave->jiffies = jiffies;
- bond->current_arp_slave = slave;
- break;
- }
+ bond_for_each_slave(bond, slave, iter) {
+ if (!found && !before && IS_UP(slave->dev))
+ before = slave;
+ if (found && !new_slave && IS_UP(slave->dev))
+ new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
* simultaneous link failures and
@@ -2812,7 +2717,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
* one the current slave so it is still marked
* up when it is actually down
*/
- if (slave->link == BOND_LINK_UP) {
+ if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2822,7 +2727,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
}
+ if (slave == bond->current_arp_slave)
+ found = true;
}
+
+ if (!new_slave && before)
+ new_slave = before;
+
+ if (!new_slave)
+ return;
+
+ new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(new_slave);
+ bond_arp_send_all(bond, new_slave);
+ new_slave->jiffies = jiffies;
+ bond->current_arp_slave = new_slave;
+
}
void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2756,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (list_empty(&bond->slave_list))
+ if (!bond_has_slaves(bond))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2953,85 @@ static struct notifier_block bond_netdev_notifier = {
/*---------------------------- Hashing Policies -----------------------------*/
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
{
struct ethhdr *data = (struct ethhdr *)skb->data;
if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
- return (data->h_dest[5] ^ data->h_source[5]) % count;
+ return data->h_dest[5] ^ data->h_source[5];
return 0;
}
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+ struct flow_keys *fk)
{
- const struct ethhdr *data;
+ const struct ipv6hdr *iph6;
const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- u32 v6hash;
- const __be32 *s, *d;
+ int noff, proto = -1;
+
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+ return skb_flow_dissect(skb, fk);
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_network_may_pull(skb, sizeof(*iph))) {
+ fk->ports = 0;
+ noff = skb_network_offset(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+ return false;
iph = ip_hdr(skb);
- data = (struct ethhdr *)skb->data;
- return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
- (data->h_dest[5] ^ data->h_source[5])) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_network_may_pull(skb, sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- data = (struct ethhdr *)skb->data;
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
- return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
- }
-
- return bond_xmit_hash_policy_l2(skb, count);
+ fk->src = iph->saddr;
+ fk->dst = iph->daddr;
+ noff += iph->ihl << 2;
+ if (!ip_is_fragment(iph))
+ proto = iph->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+ return false;
+ iph6 = ipv6_hdr(skb);
+ fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+ fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ noff += sizeof(*iph6);
+ proto = iph6->nexthdr;
+ } else {
+ return false;
+ }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+ fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+ return true;
}
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
*/
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
{
- u32 layer4_xor = 0;
- const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- const __be32 *s, *d;
- const __be16 *l4 = NULL;
- __be16 _l4[2];
- int noff = skb_network_offset(skb);
- int poff;
-
- if (skb->protocol == htons(ETH_P_IP) &&
- pskb_may_pull(skb, noff + sizeof(*iph))) {
- iph = ip_hdr(skb);
- poff = proto_ports_offset(iph->protocol);
+ struct flow_keys flow;
+ u32 hash;
- if (!ip_is_fragment(iph) && poff >= 0) {
- l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- return (layer4_xor ^
- ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
- } else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
- ipv6h = ipv6_hdr(skb);
- poff = proto_ports_offset(ipv6h->nexthdr);
- if (poff >= 0) {
- l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
- }
- s = &ipv6h->saddr.s6_addr32[0];
- d = &ipv6h->daddr.s6_addr32[0];
- layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
- layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
- (layer4_xor >> 8);
- return layer4_xor % count;
- }
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect(bond, skb, &flow))
+ return bond_eth_hash(skb) % count;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ hash = bond_eth_hash(skb);
+ else
+ hash = (__force u32)flow.ports;
+ hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
- return bond_xmit_hash_policy_l2(skb, count);
+ return hash % count;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3061,14 @@ static void bond_work_cancel_all(struct bonding *bond)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3128,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
+ struct list_head *iter;
struct slave *slave;
memset(stats, 0, sizeof(*stats));
read_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct rtnl_link_stats64 *sstats =
dev_get_stats(slave->dev, &temp);
@@ -3263,6 +3171,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = NULL;
struct ifbond k_binfo;
struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3202,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (mii->reg_num == 1) {
- struct bonding *bond = netdev_priv(bond_dev);
mii->val_out = 0;
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3273,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
- res = bond_ioctl_change_active(bond_dev, slave_dev);
+ res = bond_option_active_slave_set(bond, slave_dev);
break;
default:
res = -EOPNOTSUPP;
@@ -3393,22 +3301,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct list_head *iter;
struct slave *slave;
- ASSERT_RTNL();
+ rcu_read_lock();
if (USES_PRIMARY(bond->params.mode)) {
- slave = rtnl_dereference(bond->curr_active_slave);
+ slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
} else {
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
+ rcu_read_unlock();
}
static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3381,8 @@ static int bond_neigh_setup(struct net_device *dev,
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *rollback_slave;
+ struct list_head *iter;
int res = 0;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3403,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
- pr_debug("s %p s->p %p c_m %p\n",
+ bond_for_each_slave(bond, slave, iter) {
+ pr_debug("s %p c_m %p\n",
slave,
- bond_prev_slave(bond, slave),
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3430,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
unwind:
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3543,8 +3456,9 @@ unwind:
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave, *rollback_slave;
struct sockaddr *sa = addr, tmp_sa;
- struct slave *slave;
+ struct list_head *iter;
int res = 0;
if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3492,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
@@ -3610,13 +3524,16 @@ unwind:
tmp_sa.sa_family = bond_dev->type;
/* unwind from head to the slave that failed */
- bond_for_each_slave_continue_reverse(bond, slave) {
+ bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
- tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+ if (rollback_slave == slave)
+ break;
+
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
if (tmp_res) {
pr_debug("unwind err %d dev %s\n",
- tmp_res, slave->dev->name);
+ tmp_res, rollback_slave->dev->name);
}
}
@@ -3635,11 +3552,12 @@ unwind:
*/
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
+ struct list_head *iter;
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3568,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
/* Here we start from the first slave up to slave_id */
i = slave_id;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (slave_can_tx(slave)) {
@@ -3662,14 +3580,44 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
kfree_skb(skb);
}
+/**
+ * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
+ * @bond: bonding device to use
+ *
+ * Based on the value of the bonding device's packets_per_slave parameter
+ * this function generates a slave id, which is usually used as the next
+ * slave to transmit through.
+ */
+static u32 bond_rr_gen_slave_id(struct bonding *bond)
+{
+ int packets_per_slave = bond->params.packets_per_slave;
+ u32 slave_id;
+
+ switch (packets_per_slave) {
+ case 0:
+ slave_id = prandom_u32();
+ break;
+ case 1:
+ slave_id = bond->rr_tx_counter;
+ break;
+ default:
+ slave_id = reciprocal_divide(bond->rr_tx_counter,
+ packets_per_slave);
+ break;
+ }
+ bond->rr_tx_counter++;
+
+ return slave_id;
+}
+
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ u32 slave_id;
- /*
- * Start with the curr_active_slave that joined the bond as the
+ /* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
* needs to maintain some consistency for the interface that will
* send the join/membership reports. The curr_active_slave found
@@ -3682,8 +3630,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- bond_xmit_slave_id(bond, skb,
- bond->rr_tx_counter++ % bond->slave_cnt);
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
}
return NETDEV_TX_OK;
@@ -3707,8 +3655,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
return NETDEV_TX_OK;
}
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
* determined xmit_hash_policy(), If the selected device is not enabled,
* find the next active slave.
*/
@@ -3716,8 +3663,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb,
- bond->xmit_hash_policy(skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
return NETDEV_TX_OK;
}
@@ -3727,8 +3673,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
+ struct list_head *iter;
- bond_for_each_slave_rcu(bond, slave) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3700,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
/*------------------------- Device initialization ---------------------------*/
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
- switch (bond->params.xmit_policy) {
- case BOND_XMIT_POLICY_LAYER23:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
- break;
- case BOND_XMIT_POLICY_LAYER34:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
- break;
- case BOND_XMIT_POLICY_LAYER2:
- default:
- bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
- break;
- }
-}
-
/*
* Lookup the slave that corresponds to a qid
*/
@@ -3777,13 +3708,14 @@ static inline int bond_slave_override(struct bonding *bond,
{
struct slave *slave = NULL;
struct slave *check_slave;
+ struct list_head *iter;
int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, check_slave) {
+ bond_for_each_slave_rcu(bond, check_slave, iter) {
if (check_slave->queue_id == skb->queue_mapping) {
slave = check_slave;
break;
@@ -3869,7 +3801,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
rcu_read_lock();
- if (!list_empty(&bond->slave_list))
+ if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
kfree_skb(skb);
@@ -3878,43 +3810,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
- struct net_device *bond_dev = bond->dev;
-
- switch (mode) {
- case BOND_MODE_ROUNDROBIN:
- break;
- case BOND_MODE_ACTIVEBACKUP:
- break;
- case BOND_MODE_XOR:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_BROADCAST:
- break;
- case BOND_MODE_8023AD:
- bond_set_xmit_hash_policy(bond);
- break;
- case BOND_MODE_ALB:
- /* FALLTHRU */
- case BOND_MODE_TLB:
- break;
- default:
- /* Should never happen, mode already checked */
- pr_err("%s: Error: Unknown bonding mode %d\n",
- bond_dev->name, mode);
- break;
- }
-}
-
static int bond_ethtool_get_settings(struct net_device *bond_dev,
struct ethtool_cmd *ecmd)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned long speed = 0;
+ struct list_head *iter;
struct slave *slave;
ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3827,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
* this is an accurate maximum.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (SLAVE_IS_OK(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
@@ -3994,14 +3895,13 @@ static void bond_destructor(struct net_device *bond_dev)
free_netdev(bond_dev);
}
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
- INIT_LIST_HEAD(&bond->slave_list);
bond->params = bonding_defaults;
/* Initialize pointers */
@@ -4011,7 +3911,6 @@ static void bond_setup(struct net_device *bond_dev)
ether_setup(bond_dev);
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
- bond_set_mode_ops(bond, bond->params.mode);
bond_dev->destructor = bond_destructor;
@@ -4057,12 +3956,13 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *tmp_slave;
+ struct list_head *iter;
+ struct slave *slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+ bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
pr_info("%s: released all slaves\n", bond_dev->name);
@@ -4235,6 +4135,12 @@ static int bond_check_params(struct bond_params *params)
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
+ if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+ pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
+ packets_per_slave, USHRT_MAX);
+ packets_per_slave = 1;
+ }
+
/* reset values for TLB/ALB */
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
@@ -4424,7 +4330,10 @@ static int bond_check_params(struct bond_params *params)
params->resend_igmp = resend_igmp;
params->min_links = min_links;
params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
-
+ if (packets_per_slave > 1)
+ params->packets_per_slave = reciprocal_value(packets_per_slave);
+ else
+ params->packets_per_slave = packets_per_slave;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
@@ -4495,32 +4404,11 @@ static int bond_init(struct net_device *bond_dev)
return 0;
}
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
- return -EINVAL;
- if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
- return -EADDRNOTAVAIL;
- }
- return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
- .validate = bond_validate,
- .get_num_tx_queues = bond_get_num_tx_queues,
- .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
- as for TX queues */
-};
-
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4495,7 @@ static int __init bonding_init(void)
if (res)
goto out;
- res = rtnl_link_register(&bond_link_ops);
+ res = bond_netlink_init();
if (res)
goto err_link;
@@ -4623,7 +4511,7 @@ static int __init bonding_init(void)
out:
return res;
err:
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
@@ -4636,7 +4524,7 @@ static void __exit bonding_exit(void)
bond_destroy_debugfs();
- rtnl_link_unregister(&bond_link_ops);
+ bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4541,3 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644
index 000000000000..40e7b1cb4aea
--- /dev/null
+++ b/drivers/net/bonding/bond_netlink.c
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ [IFLA_BOND_MODE] = { .type = NLA_U8 },
+ [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int err;
+
+ if (data && data[IFLA_BOND_MODE]) {
+ int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+ err = bond_option_mode_set(bond, mode);
+ if (err)
+ return err;
+ }
+ if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+ int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+ struct net_device *slave_dev;
+
+ if (ifindex == 0) {
+ slave_dev = NULL;
+ } else {
+ slave_dev = __dev_get_by_index(dev_net(bond_dev),
+ ifindex);
+ if (!slave_dev)
+ return -ENODEV;
+ }
+ err = bond_option_active_slave_set(bond, slave_dev);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ err = bond_changelink(bond_dev, tb, data);
+ if (err < 0)
+ return err;
+
+ return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+ return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
+ nla_total_size(sizeof(u32)); /* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+ const struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+ if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+ (slave_dev &&
+ nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+ .maxtype = IFLA_BOND_MAX,
+ .policy = bond_policy,
+ .validate = bond_validate,
+ .newlink = bond_newlink,
+ .changelink = bond_changelink,
+ .get_size = bond_get_size,
+ .fill_info = bond_fill_info,
+ .get_num_tx_queues = bond_get_num_tx_queues,
+ .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
+ as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+ return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+ rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644
index 000000000000..9a5223c7b4d1
--- /dev/null
+++ b/drivers/net/bonding/bond_options.c
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+ int i;
+
+ for (i = 0; bond_mode_tbl[i].modename; i++);
+
+ return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+ if (!bond_mode_is_valid(mode)) {
+ pr_err("invalid mode value %d.\n", mode);
+ return -EINVAL;
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ pr_err("%s: unable to update mode because interface is up.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (bond_has_slaves(bond)) {
+ pr_err("%s: unable to update mode because bond has slaves.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+ pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+ bond->dev->name, bond_mode_tbl[mode].modename);
+ return -EINVAL;
+ }
+
+ /* don't cache arp_validate between modes */
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ bond->params.mode = mode;
+ return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+ struct slave *slave)
+{
+ return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+ struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+ return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+ return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+ struct net_device *slave_dev)
+{
+ int ret = 0;
+
+ if (slave_dev) {
+ if (!netif_is_bond_slave(slave_dev)) {
+ pr_err("Device %s is not bonding slave.\n",
+ slave_dev->name);
+ return -EINVAL;
+ }
+
+ if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+ pr_err("%s: Device %s is not our slave.\n",
+ bond->dev->name, slave_dev->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!USES_PRIMARY(bond->params.mode)) {
+ pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
+ return -EINVAL;
+ }
+
+ block_netpoll_tx();
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+
+ /* check to see if we are clearing active */
+ if (!slave_dev) {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
+ bond_select_active_slave(bond);
+ } else {
+ struct slave *old_active = bond->curr_active_slave;
+ struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+ BUG_ON(!new_active);
+
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ } else {
+ if (old_active && (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active slave.\n",
+ bond->dev->name, new_active->dev->name);
+ bond_change_active_slave(bond, new_active);
+ } else {
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ bond->dev->name, new_active->dev->name,
+ new_active->dev->name);
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ unblock_netpoll_tx();
+ return ret;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 20a6ee25bb63..fb868d6c22da 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&bond->lock)
{
struct bonding *bond = seq->private;
- loff_t off = 0;
+ struct list_head *iter;
struct slave *slave;
+ loff_t off = 0;
/* make sure the bond won't be taken away */
rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
if (++off == *pos)
return slave;
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = seq->private;
- struct slave *slave = v;
+ struct list_head *iter;
+ struct slave *slave;
+ bool found = false;
++*pos;
if (v == SEQ_START_TOKEN)
return bond_first_slave(bond);
- if (bond_is_last_slave(bond, slave))
+ if (bond_is_last_slave(bond, v))
return NULL;
- slave = bond_next_slave(bond, slave);
- return slave;
+ bond_for_each_slave(bond, slave, iter) {
+ if (found)
+ return slave;
+ if (slave == v)
+ found = true;
+ }
+
+ return NULL;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ec9b6460a38d..bc8fd362a5aa 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -40,6 +40,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
+#include <linux/reciprocal_div.h>
#include "bonding.h"
@@ -159,41 +160,6 @@ static const struct class_attribute class_attr_bonding_masters = {
.store = bonding_store_bonds,
};
-int bond_create_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
- int ret = 0;
-
- /* first, create a link from the slave back to the master */
- ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
- "master");
- if (ret)
- return ret;
- /* next, create a link from the master to the slave */
- sprintf(linkname, "slave_%s", slave->name);
- ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
- linkname);
-
- /* free the master link created earlier in case of error */
- if (ret)
- sysfs_remove_link(&(slave->dev.kobj), "master");
-
- return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
- struct net_device *slave)
-{
- char linkname[IFNAMSIZ+7];
-
- sysfs_remove_link(&(slave->dev.kobj), "master");
- sprintf(linkname, "slave_%s", slave->name);
- sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
/*
* Show the slaves in the current bond.
*/
@@ -201,11 +167,14 @@ static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -215,7 +184,9 @@ static ssize_t bonding_show_slaves(struct device *d,
}
res += sprintf(buf + res, "%s ", slave->dev->name);
}
- read_unlock(&bond->lock);
+
+ rtnl_unlock();
+
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -304,50 +275,26 @@ static ssize_t bonding_store_mode(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
+ int new_value, ret;
struct bonding *bond = to_bond(d);
- if (!rtnl_trylock())
- return restart_syscall();
-
- if (bond->dev->flags & IFF_UP) {
- pr_err("unable to update mode of %s because interface is up.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
- if (!list_empty(&bond->slave_list)) {
- pr_err("unable to update mode of %s because it has slaves.\n",
- bond->dev->name);
- ret = -EPERM;
- goto out;
- }
-
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid mode value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if ((new_value == BOND_MODE_ALB ||
- new_value == BOND_MODE_TLB) &&
- bond->params.arp_interval) {
- pr_err("%s: %s mode is incompatible with arp monitoring.\n",
- bond->dev->name, bond_mode_tbl[new_value].modename);
- ret = -EINVAL;
- goto out;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ ret = bond_option_mode_set(bond, new_value);
+ if (!ret) {
+ pr_info("%s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
+ new_value);
+ ret = count;
}
- /* don't cache arp_validate between modes */
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- bond->params.mode = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
- pr_info("%s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
- new_value);
-out:
rtnl_unlock();
return ret;
}
@@ -383,7 +330,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
ret = -EINVAL;
} else {
bond->params.xmit_policy = new_value;
- bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting xmit hash policy to %s (%d).\n",
bond->dev->name,
xmit_hashtype_tbl[new_value].modename, new_value);
@@ -513,7 +459,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
- if (!list_empty(&bond->slave_list)) {
+ if (bond_has_slaves(bond)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
ret = -EPERM;
@@ -647,11 +593,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
__be32 newtarget, *targets;
unsigned long *targets_rx;
int ind, i, j, ret = -EINVAL;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
targets = bond->params.arp_targets;
newtarget = in_aton(buf + 1);
/* look for adds */
@@ -679,7 +629,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave)
+ bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget;
write_unlock_bh(&bond->lock);
@@ -705,7 +655,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -725,6 +675,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
ret = count;
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1102,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
const char *buf, size_t count)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
char ifname[IFNAMSIZ];
struct slave *slave;
@@ -1129,7 +1081,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out;
}
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
@@ -1259,13 +1211,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct slave *curr;
+ struct net_device *slave_dev;
int count = 0;
rcu_read_lock();
- curr = rcu_dereference(bond->curr_active_slave);
- if (USES_PRIMARY(bond->params.mode) && curr)
- count = sprintf(buf, "%s\n", curr->dev->name);
+ slave_dev = bond_option_active_slave_get_rcu(bond);
+ if (slave_dev)
+ count = sprintf(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -1275,80 +1227,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct slave *slave, *old_active, *new_active;
+ int ret;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
+ struct net_device *dev;
if (!rtnl_trylock())
return restart_syscall();
- old_active = new_active = NULL;
- block_netpoll_tx();
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
- if (!USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: Unable to change active slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
- goto out;
- }
-
sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing active */
if (!strlen(ifname) || buf[0] == '\n') {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
- rcu_assign_pointer(bond->curr_active_slave, NULL);
- bond_select_active_slave(bond);
- goto out;
- }
-
- bond_for_each_slave(bond, slave) {
- if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current"
- " active slave.\n",
- bond->dev->name,
- slave->dev->name);
- goto out;
- } else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active"
- " slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond,
- new_active);
- } else {
- pr_info("%s: Could not set %s as"
- " active slave; either %s is"
- " down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
- }
+ dev = NULL;
+ } else {
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
}
}
- pr_info("%s: Unable to set %.*s as active slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- unblock_netpoll_tx();
+ ret = bond_option_active_slave_set(bond, dev);
+ if (!ret)
+ ret = count;
+ out:
rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1484,14 +1389,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
struct slave *slave;
int res = 0;
if (!rtnl_trylock())
return restart_syscall();
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -1502,9 +1407,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
res += sprintf(buf + res, "%s:%d ",
slave->dev->name, slave->queue_id);
}
- read_unlock(&bond->lock);
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
rtnl_unlock();
return res;
@@ -1520,6 +1425,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
{
struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
+ struct list_head *iter;
u16 qid;
int ret = count;
char *delim;
@@ -1552,11 +1458,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
if (!sdev)
goto err_no_cmd;
- read_lock(&bond->lock);
-
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (sdev == slave->dev)
/*
* We don't need to check the matching
@@ -1564,23 +1468,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
*/
update_slave = slave;
else if (qid && qid == slave->queue_id) {
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
}
}
if (!update_slave)
- goto err_no_cmd_unlock;
+ goto err_no_cmd;
/* Actually set the qids for the slave */
update_slave->queue_id = qid;
- read_unlock(&bond->lock);
out:
rtnl_unlock();
return ret;
-err_no_cmd_unlock:
- read_unlock(&bond->lock);
err_no_cmd:
pr_info("invalid input for queue_id set for %s.\n",
bond->dev->name);
@@ -1610,8 +1511,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
int new_value, ret = count;
+ struct list_head *iter;
struct slave *slave;
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (sscanf(buf, "%d", &new_value) != 1) {
pr_err("%s: no all_slaves_active value specified.\n",
bond->dev->name);
@@ -1631,8 +1536,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
goto out;
}
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 0;
@@ -1640,8 +1544,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
slave->inactive = 1;
}
}
- read_unlock(&bond->lock);
out:
+ rtnl_unlock();
return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1728,6 +1632,53 @@ out:
static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
bonding_show_lp_interval, bonding_store_lp_interval);
+static ssize_t bonding_show_packets_per_slave(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+ int packets_per_slave = bond->params.packets_per_slave;
+
+ if (packets_per_slave > 1)
+ packets_per_slave = reciprocal_value(packets_per_slave);
+
+ return sprintf(buf, "%d\n", packets_per_slave);
+}
+
+static ssize_t bonding_store_packets_per_slave(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no packets_per_slave value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (new_value < 0 || new_value > USHRT_MAX) {
+ pr_err("%s: packets_per_slave must be between 0 and %u\n",
+ bond->dev->name, USHRT_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (bond->params.mode != BOND_MODE_ROUNDROBIN)
+ pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
+ bond->dev->name);
+ if (new_value > 1)
+ bond->params.packets_per_slave = reciprocal_value(new_value);
+ else
+ bond->params.packets_per_slave = new_value;
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
+ bonding_show_packets_per_slave,
+ bonding_store_packets_per_slave);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -1759,6 +1710,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
+ &dev_attr_packets_per_slave.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd14490..77a07a12e77f 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -58,6 +58,11 @@
#define TX_QUEUE_OVERRIDE(mode) \
(((mode) == BOND_MODE_ACTIVEBACKUP) || \
((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode) \
+ (((mode) == BOND_MODE_TLB) || \
+ ((mode) == BOND_MODE_ALB))
+
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -72,63 +77,37 @@
res; })
/* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
#define bond_first_slave(bond) \
- list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+ NULL)
#define bond_last_slave(bond) \
- (list_empty(&(bond)->slave_list) ? NULL : \
- bond_to_slave((bond)->slave_list.prev))
+ (bond_has_slaves(bond) ? \
+ netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+ NULL)
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
- (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
- bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
- (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
- bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for max number of moves
- * @start: starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
- for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
- cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
/**
* bond_for_each_slave - iterate over all slaves
* @bond: the bond holding this list
* @pos: current slave
+ * @iter: list_head * iterator
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave(bond, pos) \
- list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+ netdev_for_each_lower_private((bond)->dev, pos, iter)
/* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
- list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond: the bond holding this list
- * @pos: slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
- list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -177,6 +156,7 @@ struct bond_params {
int all_slaves_active;
int resend_igmp;
int lp_interval;
+ int packets_per_slave;
};
struct bond_parm_tbl {
@@ -188,7 +168,6 @@ struct bond_parm_tbl {
struct slave {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head list;
struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
@@ -228,7 +207,6 @@ struct slave {
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
- struct list_head slave_list;
struct slave *curr_active_slave;
struct slave *current_arp_slave;
struct slave *primary_slave;
@@ -245,8 +223,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- int (*xmit_hash_policy)(struct sk_buff *, int);
- u16 rr_tx_counter;
+ u32 rr_tx_counter;
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
@@ -276,13 +253,7 @@ struct bonding {
static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
- struct slave *slave = NULL;
-
- bond_for_each_slave(bond, slave)
- if (slave->dev == slave_dev)
- return slave;
-
- return NULL;
+ return netdev_lower_dev_get_private(bond->dev, slave_dev);
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +265,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
static inline bool bond_is_lb(const struct bonding *bond)
{
- return (bond->params.mode == BOND_MODE_TLB ||
- bond->params.mode == BOND_MODE_ALB);
+ return BOND_MODE_IS_LB(bond->params.mode);
}
static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +402,18 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
void bond_mii_monitor(struct work_struct *);
void bond_loadbalance_arp_mon(struct work_struct *);
void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +423,14 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -492,9 +467,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac)
{
+ struct list_head *iter;
struct slave *tmp;
- bond_for_each_slave(bond, tmp)
+ bond_for_each_slave(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+ const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
@@ -528,4 +518,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
extern const struct bond_parm_tbl pri_reselect_tbl[];
extern struct bond_parm_tbl ad_select_tbl[];
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 693d8ffe4653..cf0f63e14e53 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
priv->clk = clk;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a2700d25ff0e..8a0b515b33ea 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
unsigned short *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7891a2..bce0be54c2f5 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
return 0;
out_free_c_can:
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
out_iounmap:
pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
unregister_c_can_dev(dev);
- pci_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
pci_iounmap(pdev, priv->base);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 294ced3cc227..d66ac265269c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(c_can_of_table),
+ .of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
.remove = c_can_plat_remove,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 034bdd816a60..ad76734b3ecc 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
- struct cc770_platform_data *pdata = pdev->dev.platform_data;
+ struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
priv->can.clock.freq = pdata->osc_freq;
if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
err = cc770_get_of_node_data(pdev, priv);
- else if (pdev->dev.platform_data)
+ else if (dev_get_platdata(&pdev->dev))
err = cc770_get_platform_data(pdev, priv);
else
err = -ENODEV;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1870c4731a57..bda1888cae9a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -645,19 +645,6 @@ static int can_changelink(struct net_device *dev,
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
- return -EOPNOTSUPP;
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
- }
-
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -680,6 +667,19 @@ static int can_changelink(struct net_device *dev,
}
}
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= cm->flags;
+ }
+
if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
@@ -702,17 +702,17 @@ static int can_changelink(struct net_device *dev,
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- size_t size;
-
- size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+ size_t size = 0;
+
+ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += nla_total_size(sizeof(struct can_berr_counter));
return size;
}
@@ -726,23 +726,20 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
- nla_put(skb, IFLA_CAN_BITTIMING,
+ if (nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming) ||
+ (priv->bittiming_const &&
+ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
(priv->do_get_berr_counter &&
!priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)))
- goto nla_put_failure;
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+ return -EMSGSIZE;
return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
}
static size_t can_get_xstats_size(const struct net_device *dev)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 8f5ce747feb5..ae08cf129ebb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1068,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
- priv->pdata = pdev->dev.platform_data;
+ priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 36bd6fa1c7f3..ab5909a7bae9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
struct device *dev;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENXIO;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index fe7dd696957e..08ac401e0214 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
{
struct net_device *net;
struct mcp251x_priv *priv;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret = -ENODEV;
if (!pdata)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 9c24d60a23b1..e98abb97a050 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -297,8 +297,8 @@ struct mscan_priv {
struct napi_struct napi;
};
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c314a961970..5f0e9b3bfa7b 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
pci_iounmap(pdev, priv->regs);
free_candev(priv->ndev);
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 3752342a678a..835921388e7b 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void ems_pci_card_reset(struct ems_pci_card *card)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 217585b97cd3..087b13bd300e 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver kvaser_pci_driver = {
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6b6f0ad75090..065ca49eb45e 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cfg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver peak_pci_driver = {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index c52c1e96bf90..f9b4f81cd86a 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
kfree(card);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
/*
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 8e259c541036..29f9b6321187 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq;
struct sja1000_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data provided!\n");
err = -ENODEV;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index afd7d85b6915..35f062282dbd 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -71,34 +71,34 @@ struct softing {
} id;
};
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
- const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
/* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
- __iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
/* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
/*
* enable or disable irq
* only called with fw.lock locked
*/
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
/* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
/* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
- const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime);
/* SOFTING DPRAM mappings */
#define DPRAM_RX 0x0000
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 65eef1eea2e2..6cd5c01b624d 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
static int softing_pdev_probe(struct platform_device *pdev)
{
- const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
struct softing *card;
struct net_device *netdev;
struct softing_priv *priv;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 3a349a22d5bc..60d95b44d0f7 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -286,15 +286,6 @@ static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
}
-static int ti_hecc_get_state(const struct net_device *ndev,
- enum can_state *state)
-{
- struct ti_hecc_priv *priv = netdev_priv(ndev);
-
- *state = priv->can.state;
- return 0;
-}
-
static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
{
struct can_bittiming *bit_timing = &priv->can.bittiming;
@@ -894,7 +885,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
void __iomem *addr;
int err = -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
goto probe_exit;
@@ -940,7 +931,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
- priv->can.do_get_state = ti_hecc_get_state;
priv->can.do_get_berr_counter = ti_hecc_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index f00c76377b44..65b735d4a6ad 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -35,7 +35,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on (ISA || EISA) && ISA_DMA_API
+ depends on ISA && ISA_DMA_API
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
select MII
---help---
This option enables driver support for a large number of 10Mbps and
- 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+ 10/100Mbps EISA, PCI and Cardbus 3Com network cards:
"Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
"Boomerang" (EtherLink XL 3c900 or 3c905) PCI
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 144942f6372b..465cc7108d8a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index ef325ffa1b5a..2923c51bb351 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -28,42 +28,42 @@ extern int ei_debug;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
#endif
/* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
extern const struct net_device_ops ei_netdev_ops;
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
static inline struct net_device *alloc_ei_netdev(void)
{
return __alloc_ei_netdev(0);
}
/* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
extern const struct net_device_ops eip_netdev_ops;
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
static inline struct net_device *alloc_eip_netdev(void)
{
return __alloc_eip_netdev(0);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index f92f001551da..36fa577970bb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
}
#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 92201080e07a..fc14a85e4d5f 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -389,9 +389,7 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
- pci_set_drvdata (pdev, NULL);
return -ENODEV;
-
}
/*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8b04bfc20cfb..171d73c1d3c2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
iounmap(base);
err_out_free_res:
pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
iounmap(np->base);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev); /* Will also free np!! */
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 7a07ee07906b..6dec86ac97cd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -104,6 +104,6 @@ struct bfin_mac_local {
#endif
};
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
#endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 0a5837b96421..ae33a99bf476 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -242,13 +242,13 @@ struct lance_private
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
#endif
#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 1b1429d5d5c2..d042511bdc13 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
err_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 10ceca523fc0..e07ce5ff2d48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+ memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
break;
case NEW_RIEBL:
- lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+ lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
break;
case PAM_CARD:
i = IO->eeprom;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 91d52b495848..427c148bb643 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, 6);
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 1b89f1a9127e..57397295887c 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
}
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
clen = len & 1;
- rtp = tp;
- rfp = fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
* do the rest, if any.
*/
clen = len & 15;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
while (clen--) {
*rtp++ = *rfp++;
}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 5c728436b85e..256f590f6bb1 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
int i;
if (dev->irq == 0 ||
- request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+ request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 2d8e28819779..38492e0b704e 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, 6);
+ memcpy(dev->dev_addr, promaddr, ETH_ALEN);
}
}
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
lp->init_block, lp->init_dma_addr);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a597b766f080..daae0e016253 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
if (skb != NULL) {
data = skb_put(skb, ETHERMINPACKET);
memset(data, 0, ETHERMINPACKET);
- memcpy(data, dev->dev_addr, 6);
- memcpy(data+6, dev->dev_addr, 6);
+ memcpy(data, dev->dev_addr, ETH_ALEN);
+ memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
bmac_transmit_packet(skb, dev);
}
spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 4ce8ceb62205..58a200df4c35 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -211,6 +211,7 @@ static int mace_probe(struct platform_device *pdev)
mp = netdev_priv(dev);
mp->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = (u32)MACE_BASE;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d0878526c0c8..b2ffad1304d2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -671,6 +671,7 @@ static int arc_emac_probe(struct platform_device *pdev)
if (!ndev)
return -ENOMEM;
+ platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -725,10 +726,10 @@ static int arc_emac_probe(struct platform_device *pdev)
/* Get MAC address from device tree */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr || !is_valid_ether_addr(mac_addr))
- eth_hw_addr_random(ndev);
- else
+ if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index fc95b235e210..5aa5e8146496 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(alx->dev);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 0f0556526ba9..7f9369a3b378 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -600,7 +600,7 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 3ef7092e3f1c..1cda49a28f7f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
{
int i;
- int ret = false;
+ bool ret = false;
u32 otp_ctrl_data;
u32 control;
u32 data;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934585e9..1b0fe2d04a0e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -499,10 +499,10 @@ struct atl1e_adapter {
extern char atl1e_driver_name[];
extern char atl1e_driver_version[];
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1966444590f6..7a73f3a9fcb5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+ if (features & NETIF_F_RXALL) {
+ /* enable RX of ALL frames */
+ *mac_ctrl_data |= MAC_CTRL_DBG;
+ } else {
+ /* disable RX of ALL frames */
+ *mac_ctrl_data &= ~MAC_CTRL_DBG;
+ }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 mac_ctrl_data = 0;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ atl1e_irq_disable(adapter);
+ mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl1e_rx_mode(features, &mac_ctrl_data);
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1e_irq_enable(adapter);
+}
+
+
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1e_vlan_mode(netdev, features);
+ if (changed & NETIF_F_RXALL)
+ atl1e_rx_mode(netdev, features);
+
+
return 0;
}
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
value |= MAC_CTRL_PROMIS_EN;
if (netdev->flags & IFF_ALLMULTI)
value |= MAC_CTRL_MC_ALL_EN;
-
+ if (netdev->features & NETIF_F_RXALL)
+ value |= MAC_CTRL_DBG;
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page_desc[que].rx_nxseq++;
/* error packet */
- if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+ if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+ !(netdev->features & NETIF_F_RXALL)) {
if (prrs->err_flag & (RRS_ERR_BAD_CRC |
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
}
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
- RRS_PKT_SIZE_MASK) - 4; /* CRC */
+ RRS_PKT_SIZE_MASK);
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ packet_size -= 4; /* CRC */
+
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL)
goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX |
NETIF_F_HW_VLAN_CTAG_TX;
-
+ /* not enabled by default */
+ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
index 3ebe19f7242b..2f27d4c4c3ad 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.h
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -42,7 +42,7 @@
#include "atlx.h"
#ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
#endif
#define PCI_COMMAND_REGISTER PCI_COMMAND
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9b017d9c58e9..079a597fa20c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
static void b44_tx(struct b44 *bp)
{
u32 cur, cons;
+ unsigned bytes_compl = 0, pkts_compl = 0;
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
+
+ bytes_compl += skb->len;
+ pkts_compl++;
+
dev_kfree_skb_irq(skb);
}
+ netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
bp->tx_cons = cons;
if (netif_queue_stopped(bp->dev) &&
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bp->flags & B44_FLAG_REORDER_BUG)
br32(bp, B44_DMATX_PTR);
+ netdev_sent_queue(dev, skb->len);
+
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
val = br32(bp, B44_ENET_CTRL);
bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+ netdev_reset_queue(bp->dev);
}
static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, 6);
+ memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 9e8a3e024e01..e2aa09ce6af7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
+ netdev_sent_queue(net_dev, skb->len);
+
wmb();
/* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
struct device *dma_dev = bgmac->core->dma_dev;
int empty_slot;
bool freed = false;
+ unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->skb->len, DMA_TO_DEVICE);
slot->dma_addr = 0;
+ bytes_compl += slot->skb->len;
+ pkts_compl++;
+
/* Free memory! :) */
dev_kfree_skb(slot->skb);
slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
freed = true;
}
+ netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
if (freed && netif_queue_stopped(bgmac->net_dev))
netif_wake_queue(bgmac->net_dev);
}
@@ -277,6 +285,26 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
return 0;
}
+static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring, int desc_idx)
+{
+ struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
+ u32 ctl0 = 0, ctl1 = 0;
+
+ if (desc_idx == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
int weight)
{
@@ -295,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct sk_buff *skb = slot->skb;
- struct sk_buff *new_skb;
struct bgmac_rx_header *rx;
u16 len, flags;
@@ -308,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len = le16_to_cpu(rx->len);
flags = le16_to_cpu(rx->flags);
- /* Check for poison and drop or pass the packet */
- if (len == 0xdead && flags == 0xbeef) {
- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
- ring->start);
- } else {
+ do {
+ dma_addr_t old_dma_addr = slot->dma_addr;
+ int err;
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
+ }
+
/* Omit CRC. */
len -= ETH_FCS_LEN;
- new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
- if (new_skb) {
- skb_put(new_skb, len);
- skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
- new_skb->data,
- len);
- skb_checksum_none_assert(skb);
- new_skb->protocol =
- eth_type_trans(new_skb, bgmac->net_dev);
- netif_receive_skb(new_skb);
- handled++;
- } else {
- bgmac->net_dev->stats.rx_dropped++;
- bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ /* Prepare new skb as replacement */
+ err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
+ if (err) {
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ dma_sync_single_for_device(dma_dev,
+ slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ break;
}
+ bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
- /* Poison the old skb */
- rx->len = cpu_to_le16(0xdead);
- rx->flags = cpu_to_le16(0xbeef);
- }
+ /* Unmap old skb, we'll pass it to the netfif */
+ dma_unmap_single(dma_dev, old_dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
- /* Make it back accessible to the hardware */
- dma_sync_single_for_device(dma_dev, slot->dma_addr,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
+ skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ netif_receive_skb(skb);
+ handled++;
+ } while (0);
if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring->start = 0;
@@ -503,8 +543,6 @@ err_dma_free:
static void bgmac_dma_init(struct bgmac *bgmac)
{
struct bgmac_dma_ring *ring;
- struct bgmac_dma_desc *dma_desc;
- u32 ctl0, ctl1;
int i;
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -537,23 +575,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)
if (ring->unaligned)
bgmac_dma_rx_enable(bgmac, ring);
- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
- j++, dma_desc++) {
- ctl0 = ctl1 = 0;
-
- if (j == ring->num_slots - 1)
- ctl0 |= BGMAC_DESC_CTL0_EOT;
- ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
- /* Is there any BGMAC device that requires extension? */
- /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
- * B43_DMA64_DCTL1_ADDREXT_MASK;
- */
-
- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
- dma_desc->ctl0 = cpu_to_le32(ctl0);
- dma_desc->ctl1 = cpu_to_le32(ctl1);
- }
+ for (j = 0; j < ring->num_slots; j++)
+ bgmac_dma_rx_setup_desc(bgmac, ring, j);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
ring->index_base +
@@ -996,6 +1019,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
bgmac_miiconfig(bgmac);
bgmac_phy_init(bgmac);
+ netdev_reset_queue(bgmac->net_dev);
+
bgmac->int_status = 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f74b69..d9980ad00b4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
- memcpy(packet, bp->dev->dev_addr, 6);
- memset(packet + 6, 0x0, 8);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ memset(packet + ETH_ALEN, 0x0, 8);
for (i = 14; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
@@ -8413,7 +8413,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- memcpy(dev->dev_addr, bp->mac_addr, 6);
+ memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_free:
free_netdev(dev);
return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c5e375ddd6c0..4e01c57d8c8d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1546,6 +1546,7 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1876,6 +1877,8 @@ struct bnx2x {
u32 dump_preset_idx;
bool stats_started;
struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -2232,7 +2235,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -2492,12 +2495,6 @@ enum {
#define NUM_MACS 8
-enum bnx2x_pci_bus_speed {
- BNX2X_PCI_LINK_SPEED_2500 = 2500,
- BNX2X_PCI_LINK_SPEED_5000 = 5000,
- BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 74d6486fccfd..dcafbda3e5be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3256,14 +3256,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
if (prot == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb)) {
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V6;
- } else if (skb_is_gso(skb)) {
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
- if (rc & XMIT_CSUM_ENC)
- rc |= XMIT_GSO_ENC_V4;
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
}
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e8efa1c93ffe..32d0f1435fb4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
+ if (IS_VF(bp))
+ return 0;
+
regdump_len = __bnx2x_get_regs_len(bp);
regdump_len *= 4;
regdump_len += sizeof(struct dump_header);
@@ -2864,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL,
- "Can't perform self-test when interface is down\n");
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
return;
}
@@ -2928,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
- if (bnx2x_test_nvram(bp) != 0) {
- if (!IS_MF(bp))
- buf[4] = 1;
- else
- buf[0] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
if (bnx2x_test_intr(bp) != 0) {
if (!IS_MF(bp))
buf[5] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 32767f6aa33f..cf1df8b62e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 51468227bf3b..20dcc02431ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
}
static int bnx2x_bsc_read(struct link_params *params,
- struct bnx2x_phy *phy,
+ struct bnx2x *bp,
u8 sl_devid,
u16 sl_addr,
u8 lc_addr,
@@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
{
u32 val, i;
int rc = 0;
- struct bnx2x *bp = params->bp;
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
* intended override.
*/
break;
- } else
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ nig_led_mode);
+ }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
0x40);
} else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
+ val);
/* Tell LED3 to blink on source */
bnx2x_cl45_read(bp, phy,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b42f89ce02ef..bb2f20291509 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9916,7 +9916,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
- int rc = false;
+ bool rc = false;
if (down_trylock(&bnx2x_prev_sem))
return false;
@@ -11186,6 +11186,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_mac_hwinfo(bp);
}
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11784,7 +11792,7 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
- return bnx2x_open_epilog(bp);
+ return 0;
}
/* called with rtnl_lock */
@@ -12082,6 +12090,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
return 0;
}
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12111,6 +12133,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12274,10 +12297,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) {
- dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -12310,34 +12336,11 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return rc;
}
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
- enum bnx2x_pci_bus_speed *speed)
-{
- u32 link_speed, val = 0;
-
- pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
- *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
- link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
- switch (link_speed) {
- case 3:
- *speed = BNX2X_PCI_LINK_SPEED_8000;
- break;
- case 2:
- *speed = BNX2X_PCI_LINK_SPEED_5000;
- break;
- default:
- *speed = BNX2X_PCI_LINK_SPEED_2500;
- }
-}
-
static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
@@ -12694,8 +12697,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- int pcie_width;
- enum bnx2x_pci_bus_speed pcie_speed;
+ enum pcie_link_width pcie_width;
+ enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -12844,18 +12847,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-
- bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
- pcie_width, pcie_speed);
-
- BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+ pcie_speed == PCI_SPEED_UNKNOWN ||
+ pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+ BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+ else
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
- pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
- pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+ pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+ pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+ pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
@@ -12874,7 +12878,6 @@ init_one_exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return rc;
}
@@ -12957,7 +12960,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5e07efb6ec13..0216d592d0ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2816,7 +2816,7 @@ struct set_vf_state_cookie {
u8 state;
};
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
{
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
@@ -3239,8 +3239,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3656,29 +3657,6 @@ alloc_mem_err:
return -ENOMEM;
}
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm fails) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held). We only want to do this if the number of VFs
- * was set before PF driver was loaded.
- */
- if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
-
- return 0;
-}
-
void bnx2x_iov_channel_down(struct bnx2x *bp)
{
int vf_idx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 059f0d460af2..1ff6a9366629 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 28757dfacf0d..9199adf32d33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
mutex_unlock(&bp->vf2pf_mutex);
}
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
int rc = 0, attempts = 0;
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
/* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
}
}
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -983,53 +1023,59 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
}
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
- u64 vf_addr;
- dma_addr_t pf_addr;
u16 length, type;
- int rc;
- struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
/* prepare response */
type = mbx->first_tlv.tl.type;
length = type == CHANNEL_TLV_ACQUIRE ?
sizeof(struct pfvf_acquire_resp_tlv) :
sizeof(struct pfvf_general_resp_tlv);
- bnx2x_add_tlv(bp, resp, 0, type, length);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
- bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
bnx2x_dp_tlv_list(bp, resp);
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
mbx->first_tlv.resp_msg_offset;
pf_addr = mbx->msg_mapping +
offsetof(struct bnx2x_vf_mbx_msg, resp);
- /* copy the response body, if there is one, before the header, as the vf
- * is sensitive to the header being written
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
*/
- if (resp->hdr.tl.length > sizeof(u64)) {
- length = resp->hdr.tl.length - sizeof(u64);
- vf_addr += sizeof(u64);
- pf_addr += sizeof(u64);
- rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
- U64_HI(vf_addr),
- U64_LO(vf_addr),
- length/4);
- if (rc) {
- BNX2X_ERR("Failed to copy response body to VF %d\n",
- vf->abs_vfid);
- goto mbx_error;
- }
- vf_addr -= sizeof(u64);
- pf_addr -= sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
}
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
/* ack the FW */
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
}
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx, int vfop_status)
{
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
struct pf_vf_resc *resc = &resp->resc;
u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
memset(resp, 0, sizeof(*resp));
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resc->hw_sbs[i].sb_qid);
DP_CONT(BNX2X_MSG_IOV, "]\n");
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
/* send the response */
vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
+ /* Clean response buffer to refrain from falsely seeing chains */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
goto mbx_done;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 1179fe06d0c7..208568bc7a71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 99394bd49a13..f58a8b80302d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
csk->vlan_id = path_resp->vlan_id;
- memcpy(csk->ha, path_resp->mac_addr, 6);
+ memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
- memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0658b43e148c..ebbfe25acaa6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
atomic_t ref_count;
};
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 12d961c4ebca..00c5be8c55b8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 133
+#define TG3_MIN_NUM 134
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jul 29, 2013"
+#define DRV_MODULE_RELDATE "Sep 16, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
return err;
}
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+ return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+ reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
static int tg3_bmcr_reset(struct tg3 *tp)
{
u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
spin_lock_bh(&tp->lock);
- if (tg3_readphy(tp, reg, &val))
+ if (__tg3_readphy(tp, mii_id, reg, &val))
val = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
spin_lock_bh(&tp->lock);
- if (tg3_writephy(tp, reg, val))
+ if (__tg3_writephy(tp, mii_id, reg, val))
ret = -EIO;
spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
u32 val;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
+ } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+ int addr;
+
+ addr = ssb_gige_get_phyaddr(tp->pdev);
+ if (addr < 0)
+ return addr;
+ tp->phy_addr = addr;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
tp->mdio_bus->reset = &tg3_mdio_reset;
- tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+ tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
tp->mdio_bus->irq = &tp->mdio_irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return i;
}
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!phydev || !phydev->drv) {
dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
u32 old_tx_mode = tp->tx_mode;
if (tg3_flag(tp, USE_PHYLIB))
- autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+ autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
else
autoneg = tp->link_config.autoneg;
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
u8 oldflowctrl, linkmesg = 0;
u32 mac_mode, lcl_adv, rmt_adv;
struct tg3 *tp = netdev_priv(dev);
- struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
spin_lock_bh(&tp->lock);
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
/* Bring the PHY back to a known state. */
tg3_bmcr_reset(tp);
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
/* Attach the MAC to the PHY. */
phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
SUPPORTED_Asym_Pause);
break;
default:
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
return -EINVAL;
}
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return;
- phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
}
static void tg3_phy_fini(struct tg3 *tp)
{
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
- phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
}
}
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
return;
}
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_SCR5_SEL |
- MII_TG3_MISC_SHDW_SCR5_LPED |
+ reg = MII_TG3_MISC_SHDW_SCR5_LPED |
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
- reg = MII_TG3_MISC_SHDW_WREN |
- MII_TG3_MISC_SHDW_APD_SEL |
- MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
if (enable)
reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
- tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
}
static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
struct phy_device *phydev;
u32 phyid, advertising;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
@@ -6848,12 +6862,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- skb = build_skb(data, frag_size);
- if (!skb) {
- tg3_frag_free(frag_size != 0, data);
- goto drop_it_no_recycle;
- }
- skb_reserve(skb, TG3_RX_OFFSET(tp));
/* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
@@ -6861,6 +6869,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
ri->data = NULL;
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ tg3_frag_free(frag_size != 0, data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
} else {
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
}
- if (err)
- return err;
-
- return 0;
+ return err;
}
static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
name = tp->dev->name;
else {
name = &tnapi->irq_lbl[0];
- snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ if (tnapi->tx_buffers && tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-txrx-%d", tp->dev->name, irq_num);
+ else if (tnapi->tx_buffers)
+ snprintf(name, IFNAMSIZ,
+ "%s-tx-%d", tp->dev->name, irq_num);
+ else if (tnapi->rx_rcb)
+ snprintf(name, IFNAMSIZ,
+ "%s-rx-%d", tp->dev->name, irq_num);
+ else
+ snprintf(name, IFNAMSIZ,
+ "%s-%d", tp->dev->name, irq_num);
name[IFNAMSIZ-1] = 0;
}
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_gset(phydev, cmd);
}
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_ethtool_sset(phydev, cmd);
}
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
- spin_lock_bh(&tp->lock);
if (device_may_wakeup(dp))
tg3_flag_set(tp, WOL_ENABLE);
else
tg3_flag_clear(tp, WOL_ENABLE);
- spin_unlock_bh(&tp->lock);
return 0;
}
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
} else {
u32 bmcr;
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
u32 newadv;
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
return -ENOMEM;
tx_data = skb_put(skb, tx_len);
- memcpy(tx_data, tp->dev->dev_addr, 6);
- memset(tx_data + 6, 0x0, 8);
+ memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+ memset(tx_data + ETH_ALEN, 0x0, 8);
tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
@@ -13683,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
return phy_mii_ioctl(phydev, ifr, cmd);
}
@@ -14921,6 +14941,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
+
+ if (tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+ LED_CTRL_BLINK_RATE_MASK;
+
break;
case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
int len;
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN) {
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
return 0;
}
return -ENODEV;
@@ -16643,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
return 0;
}
#endif
@@ -17052,10 +17081,6 @@ static int tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
-#if 0
- /* Unneeded, already done by tg3_get_invariants. */
- tg3_switch_clocks(tp);
-#endif
if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@ static int tg3_test_dma(struct tg3 *tp)
break;
}
-#if 0
- /* validate data reached card RAM correctly. */
- for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
- u32 val;
- tg3_read_mem(tp, 0x2100 + (i*4), &val);
- if (le32_to_cpu(val) != p[i]) {
- dev_err(&tp->pdev->dev,
- "%s: Buffer corrupted on device! "
- "(%d != %d)\n", __func__, val, i);
- /* ret = -ENODEV here? */
- }
- p[i] = 0;
- }
-#endif
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
if (ret) {
@@ -17362,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_flag_set(tp, FLUSH_POSTED_WRITES);
if (ssb_gige_one_dma_at_once(pdev))
tg3_flag_set(tp, ONE_DMA_AT_ONCE);
- if (ssb_gige_have_roboswitch(pdev))
+ if (ssb_gige_have_roboswitch(pdev)) {
+ tg3_flag_set(tp, USE_PHYLIB);
tg3_flag_set(tp, ROBOSWITCH);
+ }
if (ssb_gige_is_rgmii(pdev))
tg3_flag_set(tp, RGMII_MODE);
}
@@ -17409,9 +17422,12 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
struct phy_device *phydev;
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ phydev = tp->mdio_bus->phy_map[tp->phy_addr];
netdev_info(dev,
"attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@ err_out_free_res:
err_out_disable_pdev:
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -17717,7 +17732,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 70257808aa37..5c3835aa1e1b 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -68,6 +68,9 @@
#define TG3PCI_DEVICE_TIGON3_5762 0x1687
#define TG3PCI_DEVICE_TIGON3_5725 0x1643
#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
+#define TG3PCI_DEVICE_TIGON3_57764 0x1642
+#define TG3PCI_DEVICE_TIGON3_57767 0x1683
+#define TG3PCI_DEVICE_TIGON3_57787 0x1641
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b78e69e0e52a..f276433d37ce 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
- pci_set_drvdata(pdev, NULL);
return -ENOMEM;
}
pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index aefee77523f2..f7e033f8a00e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -372,38 +372,37 @@ extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
*/
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
- struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
/* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
- struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
/* Debugfs */
-void bnad_debugfs_init(struct bnad *bnad);
-void bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
/* MACROS */
/* To set & get the stats counters */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 48f52882a22b..4fc5c8ef5121 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1060,13 +1060,13 @@ static int xgmac_stop(struct net_device *dev)
{
struct xgmac_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
-
if (readl(priv->base + XGMAC_DMA_INTR_ENA))
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_disable(dev);
+
/* Disable the MAC core */
xgmac_mac_disable(priv->base);
@@ -1370,11 +1370,8 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
}
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
/* return early if the buffer sizes will not change */
- if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
- return 0;
if (old_mtu == new_mtu)
return 0;
@@ -1382,8 +1379,9 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev))
return 0;
- /* Bring the interface down and then back up */
+ /* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
+ dev->mtu = new_mtu;
return xgmac_open(dev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 5ccbed1784d2..8abb46b39032 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
return board_info(adap)->clock_core / 1000000;
}
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
-
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
-
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d7048db9863d..1d021059f097 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1168,7 +1168,6 @@ out_free_dev:
pci_release_regions(pdev);
out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
t1_sw_reset(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 40c7b93ababc..eb33a31b08a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
- memcpy(mac_addr, cmac->instance->mac_addr, 6);
+ memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
return 0;
}
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
*/
/* Store local copy */
- memcpy(cmac->instance->mac_addr, ma, 6);
+ memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index b650951791dd..45d77334d7d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3374,7 +3374,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out:
return err;
}
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 6990f6c65221..81029b872bdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -685,10 +685,6 @@
#define V_BUSY(x) ((x) << S_BUSY)
#define F_BUSY V_BUSY(1U)
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define A_MC7_EXT_MODE1 0x108
#define A_MC7_EXT_MODE2 0x10c
@@ -749,14 +745,6 @@
#define A_MC7_CAL 0x128
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
-#define S_BUSY 31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY V_BUSY(1U)
-
#define S_CAL_FAULT 30
#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
#define F_CAL_FAULT V_CAL_FAULT(1U)
@@ -815,9 +803,6 @@
#define V_OP(x) ((x) << S_OP)
#define F_OP V_OP(1U)
-#define F_OP V_OP(1U)
-#define A_SF_OP 0x6dc
-
#define A_MC7_BIST_ADDR_BEG 0x168
#define A_MC7_BIST_ADDR_END 0x16c
@@ -830,8 +815,6 @@
#define V_CONT(x) ((x) << S_CONT)
#define F_CONT V_CONT(1U)
-#define F_CONT V_CONT(1U)
-
#define A_MC7_INT_ENABLE 0x178
#define S_AE 17
@@ -1017,8 +1000,6 @@
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
-#define F_NICMODE V_NICMODE(1U)
-
#define S_IPV6ENABLE 15
#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
#define F_IPV6ENABLE V_IPV6ENABLE(1U)
@@ -1562,27 +1543,15 @@
#define A_ULPRX_STAG_ULIMIT 0x530
#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
#define A_ULPRX_PBL_LLIMIT 0x53c
#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
#define A_ULPRX_TDDP_TAGMASK 0x524
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
#define A_ULPTX_CONFIG 0x580
#define S_CFG_CQE_SOP_MASK 1
@@ -2053,8 +2022,6 @@
#define V_TMMODE(x) ((x) << S_TMMODE)
#define F_TMMODE V_TMMODE(1U)
-#define F_TMMODE V_TMMODE(1U)
-
#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
#define A_MC5_DB_FILTER_TABLE 0x710
@@ -2454,8 +2421,6 @@
#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
#define F_TXACTENABLE V_TXACTENABLE(1U)
-#define A_XGM_SERDES_CTRL0 0x8e0
-
#define S_RESET3 23
#define V_RESET3(x) ((x) << S_RESET3)
#define F_RESET3 V_RESET3(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index dfd1e36f5753..ecd2fb3ef695 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,7 +48,6 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
-#include "t4_hw.h"
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c73cabdbd4c0..8b929eeecd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
struct net_device *event_dev;
int ret = NOTIFY_DONE;
struct bonding *bond = netdev_priv(ifa->idev->dev);
+ struct list_head *iter;
struct slave *slave;
struct pci_dev *first_pdev = NULL;
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
* in all of them only once.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave) {
+ bond_for_each_slave(bond, slave, iter) {
if (!first_pdev) {
ret = clip_add(slave->dev, ifa, event);
/* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 40c22e7de15c..5f90ec5f7519 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2782,11 +2782,9 @@ err_unmap_bar:
err_free_adapter:
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
err_release_regions:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
pci_clear_master(pdev);
err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
}
iounmap(adapter->regs);
kfree(adapter);
- pci_set_drvdata(pdev, NULL);
}
/*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0xb000, 0), /* PE10K FPGA */
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index df296af20bd5..8475c4cda9e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
* Builds an sk_buff from the given packet gather list. Returns the
* sk_buff or %NULL if sk_buff allocation failed.
*/
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
{
struct sk_buff *skb;
@@ -1443,7 +1444,7 @@ out:
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
{
int frag;
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
* on this queue. If the system is under memory shortage use a fairly
* long delay to help recovery.
*/
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* The MSI interrupt handler handles data events from SGE response queues as
* well as error and other async events as they all use the same MSI vector.
*/
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
{
struct adapter *adapter = cookie;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 7b756cf9474a..ff78dfaec508 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2309,7 +2309,6 @@ err_out_release_regions:
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
enic_iounmap(enic);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a7a941b1a655..7080ad6c4014 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1623,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+ memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index eaab73cf27ca..38148b0e3a95 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
iounmap(de->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 263b92c00cbf..c05b66dfcc30 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
pci_disable_device (pdev);
}
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83139307861c..5ad9e3e3c0b8 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -523,7 +523,6 @@ err_out_res:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
-
- pci_set_drvdata(pdev, NULL);
}
DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 4e8cfa2ac803..add05f14b38b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
- pci_set_drvdata (pdev, NULL);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 93845afe1cea..a5397b130724 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -429,7 +429,6 @@ err_out_release:
err_out_disable:
pci_disable_device(pdev);
err_out_free:
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index c7b04ecf5b49..62fe512bb216 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 9b84cb04fe5f..ab7ebac6fbea 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -289,7 +289,6 @@ out:
err_unmap:
pci_iounmap(pdev, private->ioaddr);
reg_fail:
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
tx_buf_fail:
dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, card->ioaddr);
- pci_set_drvdata(pdev, NULL);
dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
free_netdev(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index afa8e3af2c4d..4fb756d219f7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
pci_release_regions (pdev);
pci_disable_device (pdev);
}
- pci_set_drvdata (pdev, NULL);
}
static struct pci_driver rio_driver = {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index bf3bf6f22c99..113cd799a131 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -703,7 +703,6 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_res:
pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
pci_iounmap(pdev, np->base);
pci_release_regions(pdev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c99dac6a9ddf..f4825db5d179 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.9.134.0u"
+#define DRV_VER "4.9.224.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_UMC_NUM_VLANS_SUPPORTED 15
-#define BE_MAX_EQD 96u
+#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024
@@ -199,8 +199,37 @@ struct be_eq_obj {
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define BE_EQ_IDLE 0
+#define BE_EQ_NAPI 1 /* napi owns this EQ */
+#define BE_EQ_POLL 2 /* poll owns this EQ */
+#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
+#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
+#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
+#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
+#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
+ unsigned int state;
+ spinlock_t lock; /* lock to serialize napi and busy-poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ bool enable;
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffies;
+ u64 rx_pkts_prev; /* Used to calculate RX pps */
+ u64 tx_reqs_prev; /* Used to calculate TX pps */
+};
+
+enum {
+ NAPI_POLLING,
+ BUSY_POLLING
+};
+
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -215,6 +244,7 @@ struct be_tx_stats {
u64 tx_compl;
ulong tx_jiffies;
u32 tx_stops;
+ u32 tx_drv_drops; /* pkts dropped by driver */
struct u64_stats_sync sync;
struct u64_stats_sync sync_compl;
};
@@ -239,15 +269,12 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
- u64 rx_pkts_prev;
- ulong rx_jiffies;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */
- u32 rx_pps; /* pkts per second */
struct u64_stats_sync sync;
};
@@ -316,6 +343,11 @@ struct be_drv_stats {
u32 rx_input_fifo_overflow_drop;
u32 pmem_fifo_overflow_drop;
u32 jabber_events;
+ u32 rx_roce_bytes_lsd;
+ u32 rx_roce_bytes_msd;
+ u32 rx_roce_frames;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
};
struct be_vf_cfg {
@@ -405,6 +437,7 @@ struct be_adapter {
u32 big_page_size; /* Compounded page size shared by rx wrbs */
struct be_drv_stats drv_stats;
+ struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
@@ -437,7 +470,6 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
- u32 if_type;
struct {
u32 size;
u32 total_size;
@@ -472,8 +504,8 @@ struct be_adapter {
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
- be_physfn(adapter))
+#define sriov_want(adapter) (be_physfn(adapter) && \
+ (num_vfs || pci_num_vf(adapter->pdev)))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
@@ -546,6 +578,10 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
i++, eqo++)
+#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
+ for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
+ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
@@ -696,6 +732,115 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock(&eqo->lock); /* BH is already disabled */
+ if (eqo->state & BE_EQ_LOCKED) {
+ WARN_ON(eqo->state & BE_EQ_NAPI);
+ eqo->state |= BE_EQ_NAPI_YIELD;
+ status = false;
+ } else {
+ eqo->state = BE_EQ_NAPI;
+ }
+ spin_unlock(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+ spin_lock(&eqo->lock); /* BH is already disabled */
+
+ WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ bool status = true;
+
+ spin_lock_bh(&eqo->lock);
+ if (eqo->state & BE_EQ_LOCKED) {
+ eqo->state |= BE_EQ_POLL_YIELD;
+ status = false;
+ } else {
+ eqo->state |= BE_EQ_POLL;
+ }
+ spin_unlock_bh(&eqo->lock);
+ return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_bh(&eqo->lock);
+
+ WARN_ON(eqo->state & (BE_EQ_NAPI));
+ eqo->state = BE_EQ_IDLE;
+
+ spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+ spin_lock_init(&eqo->lock);
+ eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+ local_bh_disable();
+
+ /* It's enough to just acquire napi lock on the eqo to stop
+ * be_busy_poll() from processing any queueus.
+ */
+ while (!be_lock_napi(eqo))
+ mdelay(1);
+
+ local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+ return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+ return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
+
static inline int fw_major_num(const char *fw_ver)
{
int fw_major = 0;
@@ -705,27 +850,19 @@ static inline int fw_major_num(const char *fw_ver)
return fw_major;
}
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
- u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
*/
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c08fd32bb8e5..7fb0edfe3d24 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -522,7 +522,7 @@ static u16 be_POST_stage_get(struct be_adapter *adapter)
return sem & POST_STAGE_MASK;
}
-int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_wait_ready(struct be_adapter *adapter)
{
#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
@@ -1436,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
- if (!BE2_chip(adapter))
+ if (BE2_chip(adapter))
+ hdr->version = 0;
+ if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
+ else
+ hdr->version = 2;
be_mcc_notify(adapter);
adapter->stats_cmd_sent = true;
@@ -1719,11 +1723,12 @@ err:
/* set the EQ delay interval of an EQ to specified value
* Uses async mcc
*/
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+ int num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
- int status = 0;
+ int status = 0, i;
spin_lock_bh(&adapter->mcc_lock);
@@ -1737,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
- req->num_eq = cpu_to_le32(1);
- req->delay[0].eq_id = cpu_to_le32(eq_id);
- req->delay[0].phase = 0;
- req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->set_eqd[i].phase = 0;
+ req->set_eqd[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
be_mcc_notify(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -3520,7 +3527,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
struct be_cmd_enable_disable_vf *req;
int status;
- if (!lancer_chip(adapter))
+ if (BEx_chip(adapter))
return 0;
spin_lock_bh(&adapter->mcc_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 108ca8abf0af..edf3e8a0ff83 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
} __packed;
/******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+};
+
struct be_cmd_req_modify_eq_delay {
struct be_cmd_req_hdr hdr;
u32 num_eq;
- struct {
- u32 eq_id;
- u32 phase;
- u32 delay_multiplier;
- } delay[8];
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
} __packed;
struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 {
u32 rsvd[4];
};
+struct be_port_rxf_stats_v2 {
+ u32 rsvd0[10];
+ u32 roce_bytes_received_lsd;
+ u32 roce_bytes_received_msd;
+ u32 rsvd1[5];
+ u32 roce_frames_received;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd2[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd3[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd4[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd5[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd6[3];
+ u32 rx_drops_payload_size;
+ u32 rx_drops_clipped_header;
+ u32 rx_drops_crc;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
+ u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+ struct be_port_rxf_stats_v2 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[35];
+};
+
struct be_hw_stats_v1 {
struct be_rxf_stats_v1 rxf;
u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
+struct be_erx_stats_v2 {
+ u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/
+ u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+ struct be_rxf_stats_v2 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v2 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v2 hw_stats;
+};
+
/************** get fat capabilites *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
@@ -1865,137 +1951,119 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
- u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id,
- u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
- struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
- u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
- u16 *vtag_array, u32 num, bool untagged,
- bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
- u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
- u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+ u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+ u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay,
+ int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+ u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, bool untagged, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
- u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
- u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
- struct be_dma_mem *cmd, u32 flash_oper,
- u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *change_status,
- u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+ u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status);
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size,
- u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
- u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
- u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
- u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id,
- u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
- u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
- u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
- u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+ int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+ struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+ u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+ u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+ u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ struct be_fat_conf_params *cfgs);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
- u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
- struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b440a1fac77b..08330034d9ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(rx_drops_mtu)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)}
+ {DRVSTAT_INFO(be_on_die_temperature)},
+ {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+ {DRVSTAT_INFO(rx_roce_bytes_msd)},
+ {DRVSTAT_INFO(rx_roce_frames)},
+ {DRVSTAT_INFO(roce_drops_payload_len)},
+ {DRVSTAT_INFO(roce_drops_crc)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
/* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ.
*/
- {DRVSTAT_TX_INFO(tx_stops)}
+ {DRVSTAT_TX_INFO(tx_stops)},
+ /* Pkts dropped in the driver's transmit path */
+ {DRVSTAT_TX_INFO(tx_drv_drops)}
};
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo = &adapter->eq_obj[0];
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
- et->rx_coalesce_usecs = eqo->cur_eqd;
- et->rx_coalesce_usecs_high = eqo->max_eqd;
- et->rx_coalesce_usecs_low = eqo->min_eqd;
+ et->rx_coalesce_usecs = aic->prev_eqd;
+ et->rx_coalesce_usecs_high = aic->max_eqd;
+ et->rx_coalesce_usecs_low = aic->min_eqd;
- et->tx_coalesce_usecs = eqo->cur_eqd;
- et->tx_coalesce_usecs_high = eqo->max_eqd;
- et->tx_coalesce_usecs_low = eqo->min_eqd;
+ et->tx_coalesce_usecs = aic->prev_eqd;
+ et->tx_coalesce_usecs_high = aic->max_eqd;
+ et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = eqo->enable_aic;
- et->use_adaptive_tx_coalesce = eqo->enable_aic;
+ et->use_adaptive_rx_coalesce = aic->enable;
+ et->use_adaptive_tx_coalesce = aic->enable;
return 0;
}
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
struct be_eq_obj *eqo;
int i;
for_all_evt_queues(adapter, eqo, i) {
- eqo->enable_aic = et->use_adaptive_rx_coalesce;
- eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
- eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
- eqo->eqd = et->rx_coalesce_usecs;
+ aic->enable = et->use_adaptive_rx_coalesce;
+ aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+ aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+ aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+ aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+ aic++;
}
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 53ed58b492c8..741d3bff5ae7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -22,6 +22,7 @@
#include <asm/div64.h>
#include <linux/aer.h>
#include <linux/if_bridge.h>
+#include <net/busy_poll.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -306,10 +307,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
}
}
@@ -320,10 +325,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
- } else {
+ } else if (BE3_chip(adapter)) {
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
}
}
@@ -422,6 +431,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v2 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+ if (be_roce_supported(adapter)) {
+ drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+ drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+ drvs->rx_roce_frames = port_stats->roce_frames_received;
+ drvs->roce_drops_crc = port_stats->roce_drops_crc;
+ drvs->roce_drops_payload_len =
+ port_stats->roce_drops_payload_len;
+ }
+}
+
static void populate_lancer_stats(struct be_adapter *adapter)
{
@@ -489,7 +552,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
void be_parse_stats(struct be_adapter *adapter)
{
- struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
struct be_rx_obj *rxo;
int i;
u32 erx_stat;
@@ -499,11 +562,13 @@ void be_parse_stats(struct be_adapter *adapter)
} else {
if (BE2_chip(adapter))
populate_be_v0_stats(adapter);
- else
- /* for BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
+ /* for BE3 */
populate_be_v1_stats(adapter);
+ else
+ populate_be_v2_stats(adapter);
- /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+ /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
for_all_rx_queues(adapter, rxo, i) {
erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +1000,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 start = txq->head;
skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
- if (!skb)
+ if (!skb) {
+ tx_stats(txo)->tx_drv_drops++;
return NETDEV_TX_OK;
+ }
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -965,6 +1032,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
+ tx_stats(txo)->tx_drv_drops++;
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -1275,53 +1343,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+ ulong now)
{
- struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
- ulong now = jiffies;
- ulong delta = now - stats->rx_jiffies;
- u64 pkts;
- unsigned int start, eqd;
+ aic->rx_pkts_prev = rx_pkts;
+ aic->tx_reqs_prev = tx_pkts;
+ aic->jiffies = now;
+}
- if (!eqo->enable_aic) {
- eqd = eqo->eqd;
- goto modify_eqd;
- }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ int eqd, i, num = 0, start;
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 rx_pkts, tx_pkts;
+ ulong now;
+ u32 pps, delta;
- if (eqo->idx >= adapter->num_rx_qs)
- return;
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ goto modify_eqd;
+ }
- stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+ rxo = &adapter->rx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+ rx_pkts = rxo->stats.rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
- /* Wrapped around */
- if (time_before(now, stats->rx_jiffies)) {
- stats->rx_jiffies = now;
- return;
- }
+ txo = &adapter->tx_obj[eqo->idx];
+ do {
+ start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+ tx_pkts = txo->stats.tx_reqs;
+ } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
- /* Update once a second */
- if (delta < HZ)
- return;
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- pkts = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
- stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
- stats->rx_pkts_prev = pkts;
- stats->rx_jiffies = now;
- eqd = (stats->rx_pps / 110000) << 3;
- eqd = min(eqd, eqo->max_eqd);
- eqd = max(eqd, eqo->min_eqd);
- if (eqd < 10)
- eqd = 0;
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ continue;
+ }
+
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
modify_eqd:
- if (eqd != eqo->cur_eqd) {
- be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
- eqo->cur_eqd = eqd;
+ if (eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
}
+
+ if (num)
+ be_cmd_modify_eqd(adapter, set_eqd, num);
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1463,7 +1557,7 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_rx_obj *rxo,
+static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
@@ -1488,7 +1582,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
-
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1546,6 +1640,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
+ skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@ -1726,6 +1821,8 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if (posted) {
atomic_add(posted, &rxq->used);
+ if (rxo->rx_post_starved)
+ rxo->rx_post_starved = false;
be_rxq_notify(adapter, rxq->id, posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
@@ -1928,6 +2025,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
@@ -1938,6 +2036,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq;
struct be_eq_obj *eqo;
+ struct be_aic_obj *aic;
int i, rc;
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2045,13 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
+ aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
- eqo->max_eqd = BE_MAX_EQD;
- eqo->enable_aic = true;
+ aic->max_eqd = BE_MAX_EQD;
+ aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2167,7 +2268,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2198,10 +2299,12 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
goto loop_continue;
}
- if (do_gro(rxcp))
+ /* Don't do gro when we're busy_polling */
+ if (do_gro(rxcp) && polling != BUSY_POLLING)
be_rx_compl_process_gro(rxo, napi, rxcp);
else
- be_rx_compl_process(rxo, rxcp);
+ be_rx_compl_process(rxo, napi, rxcp);
+
loop_continue:
be_rx_stats_update(rxo, rxcp);
}
@@ -2209,7 +2312,11 @@ loop_continue:
if (work_done) {
be_cq_notify(adapter, rx_cq->id, true, work_done);
- if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ /* When an rx-obj gets into post_starved state, just
+ * let be_worker do the posting.
+ */
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
+ !rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_ATOMIC);
}
@@ -2254,6 +2361,7 @@ int be_poll(struct napi_struct *napi, int budget)
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
int max_work = 0, work, i, num_evts;
+ struct be_rx_obj *rxo;
bool tx_done;
num_evts = events_get(eqo);
@@ -2266,13 +2374,18 @@ int be_poll(struct napi_struct *napi, int budget)
max_work = budget;
}
- /* This loop will iterate twice for EQ0 in which
- * completions of the last RXQ (default one) are also processed
- * For other EQs the loop iterates only once
- */
- for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
- work = be_process_rx(&adapter->rx_obj[i], napi, budget);
- max_work = max(work, max_work);
+ if (be_lock_napi(eqo)) {
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
+ max_work = max(work, max_work);
+ }
+ be_unlock_napi(eqo);
+ } else {
+ max_work = budget;
}
if (is_mcc_eqo(eqo))
@@ -2288,6 +2401,28 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int be_busy_poll(struct napi_struct *napi)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_rx_obj *rxo;
+ int i, work = 0;
+
+ if (!be_lock_busy_poll(eqo))
+ return LL_FLUSH_BUSY;
+
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
+ if (work)
+ break;
+ }
+
+ be_unlock_busy_poll(eqo);
+ return work;
+}
+#endif
+
void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -2519,9 +2654,11 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
- for_all_evt_queues(adapter, eqo, i)
+ for_all_evt_queues(adapter, eqo, i) {
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
napi_disable(&eqo->napi);
+ be_disable_busy_poll(eqo);
+ }
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
}
@@ -2632,6 +2769,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
+ be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -2937,7 +3075,8 @@ static int be_vf_setup(struct be_adapter *adapter)
goto err;
vf_cfg->def_vid = def_vlan;
- be_cmd_enable_vf(adapter, vf + 1);
+ if (!old_vfs)
+ be_cmd_enable_vf(adapter, vf + 1);
}
if (!old_vfs) {
@@ -2962,12 +3101,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
- if (BE3_chip(adapter) && be_physfn(adapter)) {
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
int max_vfs;
max_vfs = pci_sriov_get_totalvfs(pdev);
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
- use_sriov = res->max_vfs && num_vfs;
+ use_sriov = res->max_vfs;
}
if (be_physfn(adapter))
@@ -2983,8 +3122,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
res->max_mcast_mac = BE_MAX_MC;
+ /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
- !be_physfn(adapter))
+ !be_physfn(adapter) || (adapter->port_num > 1))
res->max_tx_qs = 1;
else
res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3166,6 @@ static int be_get_resources(struct be_adapter *adapter)
adapter->res = res;
}
- /* For BE3 only check if FW suggests a different max-txqs value */
- if (BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res, 0);
- if (!status && res.max_tx_qs)
- adapter->res.max_tx_qs =
- min(adapter->res.max_tx_qs, res.max_tx_qs);
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3264,7 +3396,7 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter) && num_vfs) {
+ if (sriov_want(adapter)) {
if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
@@ -3906,6 +4038,9 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = be_busy_poll
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3966,11 +4101,6 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- u32 sli_intf;
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
@@ -4083,9 +4213,11 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
else if (BE2_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- else
- /* BE3 and Skyhawk */
+ else if (BE3_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ else
+ /* ALL non-BE ASICs */
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL);
@@ -4119,7 +4251,6 @@ static void be_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4268,7 +4399,6 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
- struct be_eq_obj *eqo;
int i;
/* when interrupts are not yet enabled, just reap any pending
@@ -4293,14 +4423,14 @@ static void be_worker(struct work_struct *work)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
+ /* Replenish RX-queues starved due to memory
+ * allocation failures.
+ */
+ if (rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_KERNEL);
- }
}
- for_all_evt_queues(adapter, eqo, i)
- be_eqd_update(adapter, eqo);
+ be_eqd_update(adapter);
reschedule:
adapter->work_counter++;
@@ -4376,9 +4506,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
}
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (status)
- dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+ if (be_physfn(adapter)) {
+ status = pci_enable_pcie_error_reporting(pdev);
+ if (!status)
+ dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+ }
status = be_ctrl_init(adapter);
if (status)
@@ -4449,7 +4581,6 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
free_netdev(netdev);
- pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);
disable_dev:
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c706b7a9397e..4b22a9579f85 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, np->mem);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
} else
printk(KERN_ERR "fealnx: remove for unknown device\n");
}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6b60582ce8cf..56f2f608a9f4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, 6);
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ef95c3ae53e6..b14d7904a075 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2920,7 +2920,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0;
- int has_tx_work;
+ int has_tx_work = 0;
unsigned long rstat_rxf;
int num_act_queues;
@@ -2935,62 +2935,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues)
budget_per_q = budget/num_act_queues;
- while (1) {
- has_tx_work = 0;
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
}
+ }
- for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- /* skip queue if not active */
- if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
- continue;
-
- rx_queue = priv->rx_queue[i];
- work_done_per_q =
- gfar_clean_rx_ring(rx_queue, budget_per_q);
- work_done += work_done_per_q;
-
- /* finished processing this queue */
- if (work_done_per_q < budget_per_q) {
- /* clear active queue hw indication */
- gfar_write(&regs->rstat,
- RSTAT_CLEAR_RXF0 >> i);
- rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
- num_act_queues--;
-
- if (!num_act_queues)
- break;
- /* recompute budget per Rx queue */
- budget_per_q =
- (budget - work_done) / num_act_queues;
- }
- }
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+ continue;
- if (work_done >= budget)
- break;
+ rx_queue = priv->rx_queue[i];
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ }
+ }
- if (!num_act_queues && !has_tx_work) {
+ if (!num_act_queues && !has_tx_work) {
- napi_complete(napi);
+ napi_complete(napi);
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
- break;
- }
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return work_done;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04112b98ff5d..114c58f9d8d2 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
- int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 64b329fecf3d..5548b6d00c31 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3901,7 +3901,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 6231bc02b964..1085257385d2 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA
+ depends on PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 91227d03274e..37860096f744 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
HP100_BUS_EISA ? IRQF_SHARED : 0,
- "hp100", dev)) {
+ dev->name, dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index e38816145395..7ce6379fd1a3 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
lp->sa_cmd.cmd.command = CmdSASetup;
i596_add_cmd(dev, &lp->sa_cmd.cmd);
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
@@ -1527,9 +1527,7 @@ int __init init_module(void)
if (debug >= 0)
i596_debug = debug;
dev_82596 = i82596_probe(-1);
- if (IS_ERR(dev_82596))
- return PTR_ERR(dev_82596);
- return 0;
+ return PTR_ERR_OR_ZERO(dev_82596);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index d653bac4cfc4..861fa15e1e81 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
i596_add_cmd(dev, &dma->cf_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
- memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+ memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, ha->addr, 6);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: Adding address %pM\n",
dev->name, cp));
- cp += 6;
+ cp += ETH_ALEN;
}
DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index cdf2321f1f0f..ae342fdb42c8 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2678,7 +2678,7 @@ static int emac_init_config(struct emac_instance *dev)
np->full_name);
return -ENXIO;
}
- memcpy(dev->ndev->dev_addr, p, 6);
+ memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 59a92d5870b5..9c45efe4c8fe 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -29,13 +29,13 @@
struct emac_instance;
struct mal_instance;
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
# define DBG_LEVEL 1
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
index 668bceeff4a2..d4f1374d1900 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.h
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -56,15 +56,15 @@ struct rgmii_instance {
#ifdef CONFIG_IBM_EMAC_RGMII
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
index 350b7096a041..4d5f336f07b3 100644
--- a/drivers/net/ethernet/ibm/emac/tah.h
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -72,13 +72,13 @@ struct tah_instance {
#ifdef CONFIG_IBM_EMAC_TAH
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 455bfb085493..0959c55b1459 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -53,15 +53,15 @@ struct zmii_instance {
#ifdef CONFIG_IBM_EMAC_ZMII
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee69d16..952d795230a4 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, 6);
+ memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index bdf5023724e7..25045ae07171 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops ipg_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e210279f..cbaba4442d4b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2985,7 +2985,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
err_out_free_dev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd59ec75..58c147271a36 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -325,7 +325,7 @@ enum e1000_state_t {
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007dd5aa..ad6800ad1bfc 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3917,8 +3917,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- (unsigned long)((tx_ring - adapter->tx_ring) /
- sizeof(struct e1000_tx_ring)),
+ (unsigned long)(tx_ring - adapter->tx_ring),
readl(hw->hw_addr + tx_ring->tdh),
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd11015d..0150f7fc893d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -472,26 +472,25 @@ enum latency_range {
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb8a6c7..1ca9834cdfda 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -46,7 +46,6 @@
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
-#include <linux/version.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
@@ -347,9 +346,9 @@ struct i40e_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
- /* These are arrays of rings, allocated at run-time */
- struct i40e_ring *rx_rings;
- struct i40e_ring *tx_rings;
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +365,7 @@ struct i40e_vsi {
u8 dtype;
/* List of q_vectors allocated to this VSI */
- struct i40e_q_vector *q_vectors;
+ struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
@@ -422,8 +421,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
- char name[IFNAMSIZ + 9];
cpumask_t affinity_mask;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -544,6 +544,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f64b74..ef4cb1cf31f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
- char dump_request_buf[16];
bool seid_found = false;
- int bytes_not_copied;
long seid = -1;
int buflen = 0;
int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(dump_request_buf))
- return -ENOSPC;
-
- bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- if (bytes_not_copied > 0)
- count -= bytes_not_copied;
- dump_request_buf[count] = '\0';
/* decode the SEID given to be dumped */
- ret = kstrtol(dump_request_buf, 0, &seid);
- if (ret < 0) {
- dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
- dump_request_buf);
+ ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "bad seid value\n");
} else if (seid == 0) {
seid_found = true;
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
memcpy(p, vsi, len);
p += len;
- len = (sizeof(struct i40e_q_vector)
- * vsi->num_q_vectors);
- memcpy(p, vsi->q_vectors, len);
- p += len;
-
- len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
- memcpy(p, vsi->tx_rings, len);
- p += len;
- memcpy(p, vsi->rx_rings, len);
- p += len;
+ if (vsi->num_q_vectors) {
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- len = sizeof(struct i40e_tx_buffer);
- memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ if (vsi->num_queue_pairs) {
+ len = (sizeof(struct i40e_ring) *
+ vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
p += len;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+ if (vsi->tx_rings[0]) {
+ len = sizeof(struct i40e_tx_buffer);
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+ p += len;
+ }
len = sizeof(struct i40e_rx_buffer);
- memcpy(p, vsi->rx_rings[i].rx_bi, len);
- p += len;
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+ p += len;
+ }
}
/* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, vsi->rx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, vsi->rx_rings[i].dev,
- vsi->rx_rings[i].netdev,
- vsi->rx_rings[i].rx_bi);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->rx_rings[i].state,
- vsi->rx_rings[i].queue_index,
- vsi->rx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, vsi->rx_rings[i].rx_hdr_len,
- vsi->rx_rings[i].rx_buf_len,
- vsi->rx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->rx_rings[i].hsplit,
- vsi->rx_rings[i].next_to_use,
- vsi->rx_rings[i].next_to_clean,
- vsi->rx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
- i, vsi->rx_rings[i].rx_stats.packets,
- vsi->rx_rings[i].rx_stats.bytes,
- vsi->rx_rings[i].rx_stats.non_eop_descs);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
- i,
- vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
- vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->rx_rings[i].size,
- (long unsigned int)vsi->rx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->rx_rings[i].vsi,
- vsi->rx_rings[i].q_vector);
- }
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, rx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, rx_ring->dev,
+ rx_ring->netdev,
+ rx_ring->rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, rx_ring->rx_hdr_len,
+ rx_ring->rx_buf_len,
+ rx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, rx_ring->hsplit,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_rx_page_failed,
+ rx_ring->rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, rx_ring->size,
+ (long unsigned int)rx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, rx_ring->vsi,
+ rx_ring->q_vector);
}
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, vsi->tx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, vsi->tx_rings[i].dev,
- vsi->tx_rings[i].netdev,
- vsi->tx_rings[i].tx_bi);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->tx_rings[i].state,
- vsi->tx_rings[i].queue_index,
- vsi->tx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, vsi->tx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->tx_rings[i].hsplit,
- vsi->tx_rings[i].next_to_use,
- vsi->tx_rings[i].next_to_clean,
- vsi->tx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
- i, vsi->tx_rings[i].tx_stats.packets,
- vsi->tx_rings[i].tx_stats.bytes,
- vsi->tx_rings[i].tx_stats.restart_queue);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
- i,
- vsi->tx_rings[i].tx_stats.tx_busy,
- vsi->tx_rings[i].tx_stats.completed,
- vsi->tx_rings[i].tx_stats.tx_done_old);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->tx_rings[i].size,
- (long unsigned int)vsi->tx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->tx_rings[i].vsi,
- vsi->tx_rings[i].q_vector);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: DCB tc = %d\n",
- i, vsi->tx_rings[i].dcb_tc);
- }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, tx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, tx_ring->dev,
+ tx_ring->netdev,
+ tx_ring->tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, tx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, tx_ring->hsplit,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, tx_ring->size,
+ (long unsigned int)tx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, tx_ring->vsi,
+ tx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
}
+ rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
- if (vsi->q_vectors) {
- for (i = 0; i < vsi->num_q_vectors; i++) {
- dev_info(&pf->pdev->dev,
- " q_vectors[%i]: base index = %ld\n",
- i, ((long int)*vsi->q_vectors[i].rx.ring-
- (long int)*vsi->q_vectors[0].rx.ring)/
- sizeof(struct i40e_ring));
- }
- }
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return;
}
if (is_rx_ring)
- ring = vsi->rx_rings[ring_id];
+ ring = *vsi->rx_rings[ring_id];
else
- ring = vsi->tx_rings[ring_id];
+ ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
u8 *print_buf_start;
u8 *print_buf;
- char *cmd_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count -= bytes_not_copied;
cmd_buf[count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
if (!print_buf_start)
goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- u8 ma[6];
- int vlan = 0;
struct i40e_mac_filter *f;
+ int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- u8 ma[6];
int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- int v;
- u16 vid;
i40e_status ret;
+ u16 vid;
+ int v;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data;
- int ret;
u16 packet_len, i, j = 0;
char *asc_packet;
bool add = false;
+ int ret;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ u16 llen, rlen;
int ret, i;
u8 *buff;
- u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- /* Read at least 512 words */
- if (buffer_len == 0)
- buffer_len = 512;
+ /* set the max length */
+ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
bytes = 2 * buffer_len;
+
+ /* read at least 1k bytes, no more than 4kB */
+ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
buff = kzalloc(bytes, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
struct i40e_pf *pf = filp->private_data;
int bytes_not_copied;
struct i40e_vsi *vsi;
+ char *buf_tmp;
int vsi_seid;
int i, cnt;
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count -= bytes_not_copied;
i40e_dbg_netdev_ops_buf[count] = '\0';
+ buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ if (buf_tmp) {
+ *buf_tmp = '\0';
+ count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ }
+
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
goto netdev_ops_write_done;
}
for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i].napi);
+ napi_schedule(&vsi->q_vectors[i]->napi);
dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile __attribute__((unused));
+ struct dentry *pfile;
const char *name = pci_name(pf->pdev);
+ const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (pf->i40e_dbg_pf) {
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_command_fops);
- pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_dump_fops);
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_netdev_ops_fops);
- } else {
- dev_info(&pf->pdev->dev,
- "debugfs entry for %s failed\n", name);
- }
+ if (!pf->i40e_dbg_pf)
+ return;
+
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_info(dev, "debugfs dir/file for %s failed\n", name);
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8cec76c..1b86138fa9e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = vsi->rx_rings[0].count;
- ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == vsi->tx_rings[0].count) &&
- (new_rx_count == vsi->rx_rings[0].count))
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- vsi->tx_rings[i].count = new_tx_count;
- vsi->rx_rings[i].count = new_rx_count;
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
/* alloc updated Tx resources */
- if (new_tx_count != vsi->tx_rings[0].count) {
+ if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
- vsi->tx_rings[0].count, new_tx_count);
+ vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
/* alloc updated Rx resources */
- if (new_rx_count != vsi->rx_rings[0].count) {
+ if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
- vsi->rx_rings[0].count, new_rx_count);
+ vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(&vsi->tx_rings[i]);
- vsi->tx_rings[i] = tx_rings[i];
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_rx_resources(&vsi->rx_rings[i]);
- vsi->rx_rings[i] = rx_rings[i];
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ *vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
char *p;
int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+ unsigned int start;
i40e_update_stats(vsi);
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->tx_rings[j].tx_stats.packets;
- data[i++] = vsi->tx_rings[j].tx_stats.bytes;
- }
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->rx_rings[j].rx_stats.packets;
- data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ rcu_read_lock();
+ for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ struct i40e_ring *rx_ring;
+
+ if (!tx_ring)
+ continue;
+
+ /* process Tx ring statistics */
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ data[i] = tx_ring->stats.packets;
+ data[i + 1] = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ /* Rx ring is the 2nd half of the queue pair */
+ rx_ring = &tx_ring[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ data[i + 2] = rx_ring->stats.packets;
+ data[i + 3] = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
}
+ rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
}
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa4795017..be15938ba213 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
**/
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u64 bytes, packets;
+ unsigned int start;
+
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
- *storage = *i40e_get_vsi_stats_struct(vsi);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ packets = tx_ring->stats.packets;
+ bytes = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ rx_ring = &tx_ring[1];
- return storage;
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ packets = rx_ring->stats.packets;
+ bytes = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ rcu_read_unlock();
+
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+ return stats;
}
/**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i].rx_stats, 0 ,
- sizeof(vsi->rx_rings[i].rx_stats));
- memset(&vsi->tx_rings[i].tx_stats, 0,
- sizeof(vsi->tx_rings[i].tx_stats));
+ memset(&vsi->rx_rings[i]->stats, 0 ,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0 ,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
}
vsi->stat_offsets_loaded = false;
}
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
}
}
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc;
if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = 0;
rx_page = 0;
rx_buf = 0;
+ rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p;
+ u64 bytes, packets;
+ unsigned int start;
- p = &vsi->rx_rings[q];
- rx_b += p->rx_stats.bytes;
- rx_p += p->rx_stats.packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ /* locate Tx ring */
+ p = ACCESS_ONCE(vsi->tx_rings[q]);
- p = &vsi->tx_rings[q];
- tx_b += p->tx_stats.bytes;
- tx_p += p->tx_stats.packets;
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
+
+ /* Rx queue is part of the same block as Tx queue */
+ p = &p[1];
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ rx_b += bytes;
+ rx_p += packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
}
+ rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err;
}
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i].desc)
- i40e_free_tx_resources(&vsi->tx_rings[i]);
+ if (vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
}
/**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err;
}
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i].desc)
- i40e_free_rx_resources(&vsi->rx_rings[i]);
+ if (vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
}
/**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* Now associate this queue with this PCI function */
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
- err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err;
}
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err;
}
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *rx_ring = vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
*/
qp = vsi->base_queue;
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
- struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- i40e_flush(hw);
+ /* skip the flush */
}
/**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int vector, err;
for (vector = 0; vector < q_vectors; vector++) {
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
- if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring[0]) {
+ } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring[0]) {
+ } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
}
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
i40e_irq_dynamic_enable_icr0(pf);
}
+ i40e_flush(&pf->hw);
return 0;
}
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0 = rd32(hw, I40E_PFINT_ICR0);
- /* if sharing a legacy IRQ, we might get called w/o an intr pending */
- if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
-
val = rd32(hw, I40E_PFINT_DYN_CTL0);
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
/**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
* @vsi: the VSI being configured
* @v_idx: vector index
- * @r_idx: rx queue index
+ * @qp_idx: queue pair index
**/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
- rx_ring->q_vector = q_vector;
- q_vector->rx.ring[q_vector->rx.count] = rx_ring;
- q_vector->rx.count++;
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @t_idx: tx queue index
- **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
-{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector;
- q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- q_vector->num_ringpairs++;
- q_vector->vsi = vsi;
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
}
/**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{
int qp_remaining = vsi->num_queue_pairs;
int q_vectors = vsi->num_q_vectors;
- int qp_per_vector;
+ int num_ringpairs;
int v_start = 0;
int qp_idx = 0;
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
* group them so there are multiple queues per vector.
*/
for (; v_start < q_vectors && qp_remaining; v_start++) {
- qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
- for (; qp_per_vector;
- qp_per_vector--, qp_idx++, qp_remaining--) {
- map_vector_to_rxq(vsi, v_start, qp_idx);
- map_vector_to_txq(vsi, v_start, qp_idx);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+ num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+ q_vector->num_ringpairs = num_ringpairs;
+
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+
+ while (num_ringpairs--) {
+ map_vector_to_qp(vsi, v_start, qp_idx);
+ qp_idx++;
+ qp_remaining--;
}
}
}
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
- i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i].num_ringpairs == 0)
+ if (vsi->q_vectors[i]->num_ringpairs == 0)
continue;
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
free_irq(pf->msix_entries[vector].vector,
- &vsi->q_vectors[i]);
+ vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@@ -3164,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
}
/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *ring;
+
+ if (!q_vector)
+ return;
+
+ /* disassociate q_vector from rings */
+ i40e_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ vsi->q_vectors[v_idx] = NULL;
+
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI being un-configured
*
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
- struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
- int r_idx;
-
- if (!q_vector)
- continue;
-
- /* disassociate q_vector from rings */
- for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
- q_vector->tx.ring[r_idx]->q_vector = NULL;
- for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
- q_vector->rx.ring[r_idx]->q_vector = NULL;
-
- /* only VSI w/ an associated netdev is set up w/ NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
- }
- kfree(vsi->q_vectors);
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ i40e_free_q_vector(vsi, v_idx);
}
/**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx].napi);
+ napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx].napi);
+ napi_disable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
+ netdev_info(vsi->netdev, "NIC Link is Up\n");
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ } else if (vsi->netdev) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
}
i40e_service_event_schedule(pf);
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_clean_tx_ring(&vsi->tx_rings[i]);
- i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
}
}
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(&vsi->tx_rings[i]);
+ set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i].state))
+ &vsi->tx_rings[i]->state))
armed++;
}
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED)))
return;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
+ int sz_vectors;
+ int sz_rings;
int vsi_idx;
int i;
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
- goto err_alloc_vsi; /* out of VSI slots! */
+ goto unlock_pf; /* out of VSI slots! */
}
pf->next_vsi = ++i;
vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
if (!vsi) {
ret = -ENOMEM;
- goto err_alloc_vsi;
+ goto unlock_pf;
}
vsi->type = type;
vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
- i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* allocate memory for ring pointers */
+ sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ goto err_rings;
+ }
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ /* allocate memory for q_vector pointers */
+ sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
-err_alloc_vsi:
+ goto unlock_pf;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+err_rings:
+ pf->next_vsi = i - 1;
+ kfree(vsi);
+unlock_pf:
mutex_unlock(&pf->switch_mutex);
return ret;
}
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ /* free the ring and vector containers */
+ kfree(vsi->q_vectors);
+ kfree(vsi->tx_rings);
+
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@ free_vsi:
}
/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings[0])
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ vsi->rx_rings[i] = NULL;
+ }
+
+ return 0;
+}
+
+/**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int ret = 0;
int i;
- vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->rx_rings) {
- ret = -ENOMEM;
- goto err_alloc_rings;
- }
-
- vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
- kfree(vsi->rx_rings);
- goto err_alloc_rings;
- }
-
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ vsi->tx_rings[i] = tx_ring;
+ rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
- }
-
-err_alloc_rings:
- return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
- if (vsi) {
- kfree(vsi->rx_rings);
- kfree(vsi->tx_rings);
+ vsi->rx_rings[i] = rx_ring;
}
return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
}
/**
@@ -5249,6 +5356,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi,
+ i40e_napi_poll, vsi->work_limit);
+
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
* i40e_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
+ int err;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- vsi->q_vectors = kcalloc(num_q_vectors,
- sizeof(struct i40e_q_vector),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- return -ENOMEM;
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- vsi->q_vectors[v_idx].vsi = vsi;
- vsi->q_vectors[v_idx].v_idx = v_idx;
- cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
- i40e_napi_poll, vsi->work_limit);
+ err = i40e_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
}
return 0;
+
+err_out:
+ while (v_idx--)
+ i40e_free_q_vector(vsi, v_idx);
+
+ return err;
}
/**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
- dev_info(&pf->pdev->dev,
- "MSI init failed (%d), trying legacy.\n", err);
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
}
+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
}
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
int ret = -ENOENT;
struct i40e_pf *pf = vsi->back;
- if (vsi->q_vectors) {
+ if (vsi->q_vectors[0]) {
dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->seid);
return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
- vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
- vsi->num_q_vectors, vsi->idx);
+ if (vsi->num_q_vectors)
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
"failed to get q tracking for VSI %d, err=%d\n",
@@ -7062,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
pf->vsi = kzalloc(len, GFP_KERNEL);
- if (!pf->vsi)
+ if (!pf->vsi) {
+ err = -ENOMEM;
goto err_switch_setup;
+ }
err = i40e_setup_pf_switch(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa9b0cc..f1f03bc5c729 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
}
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
+ unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
if (!vsi)
return -ENOENT;
- tx_ring = &vsi->tx_rings[0];
+ tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
- << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
- & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+ fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
- << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
- & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+ fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
- << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
- & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+ fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK;
/* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0)
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
else
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((fdir_data->dest_vsi
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
- & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+ fpt |= ((u32)fdir_data->dest_vsi <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
- fdir_desc->dtype_cmd_cntindex =
- cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+ dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add)
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
else
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
- << I40E_TXD_FLTR_QW1_DEST_SHIFT)
- & I40E_TXD_FLTR_QW1_DEST_MASK);
+ dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
- & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+ dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
if (fdir_data->cnt_index != 0) {
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32((fdir_data->cnt_index
- << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
- & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dcc |= ((u32)fdir_data->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */
- tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
tx_desc->buffer_addr = cpu_to_le64(dma);
- td_cmd = I40E_TX_DESC_CMD_EOP |
- I40E_TX_DESC_CMD_RS |
- I40E_TX_DESC_CMD_DUMMY;
+ td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
- /* Mark the data descriptor to be watched */
- tx_buf->next_to_watch = tx_desc;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/
wmb();
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
writel(tx_ring->next_to_use, tx_ring->tail);
return 0;
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
}
/**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
{
- if (tx_buffer->dma) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
- dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
- else
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer->dma = 0;
- tx_buffer->time_stamp = 0;
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
/**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
**/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{
- struct i40e_tx_buffer *tx_buffer;
unsigned long bi_size;
u16 i;
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer->skb)
- dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
- }
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
}
/**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- for (; budget; budget--) {
- struct i40e_tx_desc *eop_desc;
-
- eop_desc = tx_buf->next_to_watch;
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* count the packet as being completed */
- tx_ring->tx_stats.completed++;
+ /* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;
- tx_buf->time_stamp = 0;
-
- /* set memory barrier before eop_desc is verified */
- rmb();
- do {
- i40e_unmap_tx_resource(tx_ring, tx_buf);
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
- /* clear dtype status */
- tx_desc->cmd_type_offset_bsz &=
- ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
- if (likely(tx_desc == eop_desc)) {
- eop_desc = NULL;
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
- total_bytes += tx_buf->bytecount;
- total_packets += tx_buf->gso_segs;
- }
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
tx_buf++;
tx_desc++;
i++;
- if (unlikely(i == tx_ring->count)) {
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
- } while (eop_desc);
- }
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
return true;
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
i40e_set_new_dynamic_itr(&q_vector->tx);
if (old_itr != q_vector->tx.itr)
wr32(hw, reg_addr, q_vector->tx.itr);
-
- i40e_flush(hw);
}
/**
@@ -1042,8 +1064,10 @@ next_desc:
}
rx_ring->next_to_clean = i;
- rx_ring->rx_stats.packets += total_rx_packets;
- rx_ring->rx_stats.bytes += total_rx_bytes;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
bool clean_complete = true;
int budget_per_ring;
- int i;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
return 0;
}
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
- * Since the actual Tx work is minimal, we can give the Tx a larger
- * budget and be more aggressive about cleaning up the Tx descriptors.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- for (i = 0; i < q_vector->num_ringpairs; i++) {
- clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
- vsi->work_limit);
- clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
- budget_per_ring);
- }
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
}
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th;
unsigned int hlen;
u32 flex_ptype, dtype_cmd;
+ u16 i;
/* make sure ATR is enabled */
if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->atr_count = 0;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
@@ -1276,27 +1303,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
}
/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring: ptr to the ring to send
- * @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
-{
- if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
- !(tx_flags & I40E_TX_FLAGS_TXSW)) {
- if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
- return false;
- }
-
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-/**
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2)
{
struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
- context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct device *dev = tx_ring->dev;
- u32 paylen = skb->len - hdr_len;
- u16 i = tx_ring->next_to_use;
+ struct skb_frag_struct *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
- u32 buf_offset = 0;
+ u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
-
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT;
}
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
tx_desc = I40E_TX_DESC(tx_ring, i);
- for (;;) {
- while (size > I40E_MAX_DATA_PER_TXD) {
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
I40E_MAX_DATA_PER_TXD, td_tag);
- buf_offset += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
-
tx_desc++;
i++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
}
- }
- tx_bi = &tx_ring->tx_bi[i];
- tx_bi->length = buf_offset + size;
- tx_bi->tx_flags = tx_flags;
- tx_bi->dma = dma;
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
if (likely(!data_len))
break;
- size = skb_frag_size(frag);
- data_len -= size;
- buf_offset = 0;
- tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- frag++;
- }
-
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ size = skb_frag_size(frag);
+ data_len -= size;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
- tx_ring->next_to_use = i;
+ tx_bi = &tx_ring->tx_bi[i];
+ }
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
- /* multiply data chunks by size of headers */
- tx_bi->bytecount = paylen + (gso_segs * hdr_len);
- tx_bi->gso_segs = gso_segs;
- tx_bi->skb = skb;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
- /* set the timestamp and next to watch values */
+ /* set the timestamp */
first->time_stamp = jiffies;
- first->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
*/
wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
writel(i, tx_ring->tail);
+
return;
dma_error:
- dev_info(dev, "TX DMA map failed\n");
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_bi map */
for (;;) {
tx_bi = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_bi);
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first)
break;
if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
i--;
}
- dev_kfree_skb_any(skb);
-
tx_ring->next_to_use = i;
}
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
/* Always offload the checksum, since it's in the data descriptor */
- if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- /* always enable offload insertion */
- td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
- if (tx_flags & I40E_TX_FLAGS_CSUM)
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
+ }
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722d98a7..db55d9947f15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,23 +102,20 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u32 tx_flags;
struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
unsigned int bytecount;
- u16 gso_segs;
- u8 mapped_as_page;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
unsigned int page_offset;
};
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
u64 packets;
u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
struct i40e_rx_queue_stats {
- u64 packets;
- u64 bytes;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
/* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
union {
struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
};
struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */
- struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
u16 itr;
};
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e2408..07596982a477 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ (pf->hw.func_caps.num_msix_vectors_vf
* vf->vf_id) + (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
& I40E_QTX_CTL_PF_INDX_MASK);
qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
<< I40E_QTX_CTL_VFVM_INDX_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b4235..8c2437722aad 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -28,14 +28,14 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06a6c68..2e166b22d52b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -562,11 +562,11 @@ struct e1000_hw {
u8 revision_id;
};
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
/* These functions must be implemented by drivers */
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b7ea99..2d913716573a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -28,26 +28,24 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc608..e4cbe8ef67b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e7266759a10b..c4c4fe332c7e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
goto out;
}
- if (phy->type == e1000_phy_i210) {
- ret_val = igb_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
- }
out:
return ret_val;
@@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
return ret_val;
}
+ ret_val = igb_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b098edae..5e9ed89403aa 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -483,40 +483,38 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
#ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 151e00cad113..b0f3666b1d7f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -146,6 +146,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
@@ -169,13 +170,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
} else {
ecmd->supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
SUPPORTED_Autoneg |
SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_FIBRE;
-
- if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ ecmd->advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
+ if (hw->mac.type == e1000_i354) {
+ if ((hw->device_id ==
+ E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
+ ecmd->supported &=
+ ~SUPPORTED_1000baseKX_Full;
+ ecmd->advertising |= ADVERTISED_2500baseX_Full;
+ ecmd->advertising &=
+ ~ADVERTISED_1000baseKX_Full;
+ }
}
if (eth_flags->e100_base_fx) {
ecmd->supported |= SUPPORTED_100baseT_Full;
@@ -187,35 +197,29 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
}
-
if (hw->mac.autoneg != 1)
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- if (hw->fc.requested_mode == e1000_fc_full)
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
ecmd->advertising |= ADVERTISED_Pause;
- else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ break;
+ case e1000_fc_rx_pause:
ecmd->advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ break;
+ case e1000_fc_tx_pause:
ecmd->advertising |= ADVERTISED_Asym_Pause;
- else
+ break;
+ default:
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
-
- status = rd32(E1000_STATUS);
-
+ }
if (status & E1000_STATUS_LU) {
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported = SUPPORTED_2500baseX_Full;
- ecmd->advertising = ADVERTISED_2500baseX_Full;
- ecmd->speed = SPEED_2500;
- } else {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
- }
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
ecmd->speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
@@ -232,7 +236,6 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->speed = -1;
ecmd->duplex = -1;
}
-
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -771,8 +774,10 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
+ if ((hw->mac.type >= e1000_i210) &&
+ !igb_get_flash_presence_i210(hw)) {
return -EOPNOTSUPP;
+ }
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -1659,7 +1664,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1731,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
@@ -2877,6 +2884,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
return 0;
}
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int max_combined = 0;
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ max_combined = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_combined = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ case e1000_i354:
+ default:
+ max_combined = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igb_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ if (count > igb_max_channels(adapter))
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igb_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2913,6 +3002,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh_indir = igb_get_rxfh_indir,
.set_rxfh_indir = igb_set_rxfh_indir,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2a8ccd..ebe6370c4b18 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
#endif
#ifdef CONFIG_PM
@@ -2429,7 +2430,7 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_disable_sriov(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2470,27 +2471,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries) {
+ if (!adapter->msix_entries || num_vfs > 7) {
err = -EPERM;
goto out;
}
-
if (!num_vfs)
goto out;
- else if (old_vfs && old_vfs == num_vfs)
- goto out;
- else if (old_vfs && old_vfs != num_vfs)
- err = igb_disable_sriov(pdev);
-
- if (err)
- goto out;
-
- if (num_vfs > 7) {
- err = -EPERM;
- goto out;
- }
- adapter->vfs_allocated_count = num_vfs;
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+ old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
+ } else
+ adapter->vfs_allocated_count = num_vfs;
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage), GFP_KERNEL);
@@ -2504,10 +2497,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
-
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs) {
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -2623,7 +2618,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_pci_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -5708,7 +5703,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, 6);
+ memcpy(addr, vf_mac, ETH_ALEN);
igb_write_mbx(hw, msgbuf, 3, vf);
}
@@ -7838,4 +7833,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_SUCCESS;
}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igb_open(netdev);
+
+ return err;
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3d14c0..7d6a25c8f889 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 93eb7ee06d3e..9fadbb28cf08 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
+ max_frame > MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- }
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@ -2699,7 +2698,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ei->get_variants) {
err = ei->get_variants(adapter);
if (err)
- goto err_ioremap;
+ goto err_get_variants;
}
/* setup adapter struct */
@@ -2796,6 +2795,7 @@ err_hw_init:
kfree(adapter->rx_ring);
err_sw_init:
igbvf_reset_interrupt_capability(adapter);
+err_get_variants:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10ce12f..955ad8c2c534 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
- memcpy(hw->mac.perm_addr, addr, 6);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
ret_val = -E1000_ERR_MAC_INIT;
}
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97ff1b3..2224cc2edf13 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -187,21 +187,21 @@ enum ixgb_state_t {
};
/* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35c33aa..0bd5d72e1af5 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
};
/* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
- u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, u32 pad);
/* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c6e4e..f38fc0a343a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,7 +55,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
@@ -67,7 +67,11 @@
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
+#if (PAGE_SIZE < 8192)
#define IXGBE_DEFAULT_RXD 512
+#else
+#define IXGBE_DEFAULT_RXD 128
+#endif
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -187,11 +191,11 @@ struct ixgbe_rx_buffer {
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -219,6 +223,15 @@ enum ixgbe_ring_state_t {
__IXGBE_RX_FCOE,
};
+struct ixgbe_fwd_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct net_device *netdev;
+ struct ixgbe_adapter *real_adapter;
+ unsigned int tx_base_queue;
+ unsigned int rx_base_queue;
+ int pool;
+};
+
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
@@ -236,6 +249,7 @@ struct ixgbe_ring {
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
+ struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -293,6 +307,12 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS 31
+#define IXGBE_MAX_DCBMACVLANS 8
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -369,11 +389,13 @@ struct ixgbe_q_vector {
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
+#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
+#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
@@ -394,18 +416,18 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = true;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
} else
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
- spin_unlock(&q_vector->lock);
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -413,14 +435,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = false;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
IXGBE_QV_STATE_NAPI_YIELD));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
- spin_unlock(&q_vector->lock);
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -432,7 +455,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
} else
@@ -451,17 +474,32 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
- WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+ WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
+
+/* false if QV is currently owned */
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBE_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+
+ return rc;
+}
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -487,10 +525,16 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
return false;
}
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
+
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ return true;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
@@ -738,6 +782,7 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
+ unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
struct ixgbe_fdir_filter {
@@ -786,93 +831,89 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
#endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
- struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
#ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +925,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -906,13 +947,16 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+ int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649abf97c0..4e7c9b098b58 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1129
+#define IXGBE_REGS_LEN 1139
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
/* DCB */
- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
- for (i = 0; i < 8; i++)
- regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
- for (i = 0; i < 8; i++)
- regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
- for (i = 0; i < 8; i++)
- regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < 8; i++)
- regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ regs_buff[865 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
for (i = 0; i < 8; i++)
- regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+ regs_buff[873 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
/* Statistics */
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* 82599 X540 specific registers */
regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* 82599 X540 specific DCB registers */
+ regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < 4; i++)
+ regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+ regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+ /* same as RTTQCNRM */
+ regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+ /* same as RTTQCNRR */
+
+ /* X540 specific DCB registers */
+ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+ regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_misses", i);
+ sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_cleaned", i);
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_misses", i);
+ sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_cleaned", i);
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -2212,13 +2257,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
- if ((adapter->tx_itr_setting > 1) &&
+ if ((adapter->tx_itr_setting != 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
- (tx_itr_prev > IXGBE_100K_ITR))
+ (tx_itr_prev >= IXGBE_100K_ITR))
need_reset = true;
} else {
- if ((tx_itr_prev > 1) &&
+ if ((tx_itr_prev != 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e1089ecc..32e3eaaa160a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
+ bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4)) {
+ if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ txr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ rxr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd5ef53..bd8f5239dfe6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -132,7 +133,7 @@ static struct notifier_block dca_notifier = {
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
MODULE_PARM_DESC(max_vfs,
- "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
+ "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
static unsigned int allow_unsupported_sfp;
@@ -153,7 +154,6 @@ MODULE_VERSION(DRV_VERSION);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
- int pos = 0;
struct pci_dev *parent_dev;
struct pci_bus *parent_bus;
@@ -165,11 +165,10 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
if (!parent_dev)
return -1;
- pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
- if (!pos)
+ if (!pci_is_pcie(parent_dev))
return -1;
- pci_read_config_word(parent_dev, pos + reg, value);
+ pcie_capability_read_word(parent_dev, reg, value);
return 0;
}
@@ -247,7 +246,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
max_gts = 4 * width;
break;
case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding only reduces throughput by 1% */
+ /* 128b/130b encoding reduces throughput by less than 2% */
max_gts = 8 * width;
break;
default:
@@ -265,7 +264,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
width,
(speed == PCIE_SPEED_2_5GT ? "20%" :
speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "N/a" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
"Unknown"));
if (max_gts < expected_gts) {
@@ -872,11 +871,18 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+ u32 head, tail;
- u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+ if (ring->l2_accel_priv)
+ adapter = ring->l2_accel_priv->real_adapter;
+ else
+ adapter = netdev_priv(ring->netdev);
+
+ hw = &adapter->hw;
+ head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
if (head != tail)
return (head < tail) ?
@@ -1585,7 +1591,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (ixgbe_qv_ll_polling(q_vector))
+ if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2103,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
ixgbe_for_each_ring(ring, q_vector->rx) {
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
if (found)
ring->stats.cleaned += found;
else
@@ -3005,7 +3011,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
- netif_set_xps_queue(adapter->netdev,
+ netif_set_xps_queue(ring->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
@@ -3395,7 +3401,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- int p;
+ u16 pool;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -3412,9 +3418,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1 << 29;
- for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
- psrtype);
+ for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@ -3683,7 +3688,11 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3713,7 +3722,11 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3750,7 +3763,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV mode significantly less RAR entries are available */
+ /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
@@ -3825,14 +3838,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
}
ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = false;
@@ -3849,6 +3854,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
@@ -3893,15 +3905,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- local_bh_disable(); /* for ixgbe_qv_lock_napi() */
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+ while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
pr_info("QV %d locked\n", q_idx);
- mdelay(1);
+ usleep_range(1000, 20000);
}
}
- local_bh_enable();
}
#ifdef CONFIG_IXGBE_DCB
@@ -4118,6 +4128,228 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
+static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
+ struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /* No unicast promiscuous support for VMDQ devices. */
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+ vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+
+ /* clear the affected bit */
+ vmolr &= ~IXGBE_VMOLR_MPE;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ hw->mac.ops.update_mc_addr_list(hw, dev);
+ }
+ ixgbe_write_uc_addr_list(adapter->netdev);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+}
+
+static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 pool)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int entry;
+
+ entry = hw->mac.num_rar_entries - pool;
+ hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
+}
+
+static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int rss_i = adapter->num_rx_queues_per_pool;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 pool = vadapter->pool;
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+ if (IXGBE_CB(skb)->page_released) {
+ dma_unmap_page(dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ }
+ dev_kfree_skb(skb);
+ }
+ rx_buffer->skb = NULL;
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ __free_pages(rx_buffer->page,
+ ixgbe_rx_pg_order(rx_ring));
+ rx_buffer->page = NULL;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
+ struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int index = rx_ring->queue_index + vadapter->rx_base_queue;
+
+ /* shutdown specific queue receive and wait for dma to settle */
+ ixgbe_disable_rx_queue(adapter, rx_ring);
+ usleep_range(10000, 20000);
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_clean_rx_ring(rx_ring);
+ rx_ring->l2_accel_priv = NULL;
+}
+
+int ixgbe_fwd_ring_down(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int txbase = accel->tx_base_queue;
+ int i;
+
+ netif_tx_stop_all_queues(vdev);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+ adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
+ }
+
+
+ return 0;
+}
+
+static int ixgbe_fwd_ring_up(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase, txbase, queues;
+ int i, baseq, err = 0;
+
+ if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ return 0;
+
+ baseq = accel->pool * adapter->num_rx_queues_per_pool;
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ accel->pool, adapter->num_rx_pools,
+ baseq, baseq + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+
+ accel->netdev = vdev;
+ accel->rx_base_queue = rxbase = baseq;
+ accel->tx_base_queue = txbase = baseq;
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->rx_ring[rxbase + i]->netdev = vdev;
+ adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->netdev = vdev;
+ adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ }
+
+ queues = min_t(unsigned int,
+ adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
+ err = netif_set_real_num_tx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ err = netif_set_real_num_rx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ if (is_valid_ether_addr(vdev->dev_addr))
+ ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+
+ ixgbe_fwd_psrtype(accel);
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return err;
+fwd_queue_err:
+ ixgbe_fwd_ring_down(vdev, accel);
+ return err;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (dfwd->fwd_priv) {
+ err = ixgbe_fwd_ring_up(upper, vadapter);
+ if (err)
+ continue;
+ }
+ }
+ }
+}
+
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -4169,6 +4401,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
+ ixgbe_configure_dfwd(adapter);
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -4322,6 +4555,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
int err;
u32 ctrl_ext;
@@ -4365,6 +4600,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
+ /* enable any upper devices */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_start_all_queues(upper);
+ }
+ }
+
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4456,59 +4701,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- **/
-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->skb) {
- struct sk_buff *skb = rx_buffer->skb;
- if (IXGBE_CB(skb)->page_released) {
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- }
- dev_kfree_skb(skb);
- }
- rx_buffer->skb = NULL;
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_pages(rx_buffer->page,
- ixgbe_rx_pg_order(rx_ring));
- rx_buffer->page = NULL;
- }
-
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-}
-
-/**
* ixgbe_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
@@ -4585,6 +4777,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 rxctrl;
int i;
@@ -4608,6 +4802,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ /* disable any upper devices */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv) {
+ netif_tx_stop_all_queues(upper);
+ netif_carrier_off(upper);
+ netif_tx_disable(upper);
+ }
+ }
+ }
+
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -4816,11 +5023,20 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
+ if (max_vfs > 0)
+ e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
+
/* assign number of SR-IOV VFs */
- if (hw->mac.type != ixgbe_mac_82598EB)
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (max_vfs > 63) {
+ adapter->num_vfs = 0;
+ e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
+ } else {
+ adapter->num_vfs = max_vfs;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
-#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4838,6 +5054,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -EIO;
}
+ /* PF holds first pool slot */
+ set_bit(0, &adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -5143,7 +5361,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
+ int err, queues;
/* disallow open during test */
if (test_bit(__IXGBE_TESTING, &adapter->state))
@@ -5168,16 +5386,21 @@ static int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_tx_queues);
+ if (adapter->num_rx_pools > 1)
+ queues = adapter->num_rx_queues_per_pool;
+ else
+ queues = adapter->num_tx_queues;
+
+ err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_rx_queues);
+ if (adapter->num_rx_pools > 1 &&
+ adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
+ queues = IXGBE_MAX_L2A_QUEUES;
+ else
+ queues = adapter->num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6767,8 +6990,9 @@ out_drop:
return NETDEV_TX_OK;
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct ixgbe_ring *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
@@ -6784,10 +7008,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
skb_set_tail_pointer(skb, 17);
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
+ tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ return __ixgbe_xmit_frame(skb, netdev, NULL);
+}
+
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -7044,6 +7275,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
@@ -7051,6 +7283,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
tc < MAX_TRAFFIC_CLASS))
return -EINVAL;
+ pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
+ if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
+ return -EBUSY;
+
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -7305,6 +7541,104 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
}
+static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = NULL;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ unsigned int limit;
+ int pool, err;
+
+#ifdef CONFIG_RPS
+ if (vdev->num_rx_queues != vdev->num_tx_queues) {
+ netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
+ vdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+#endif
+ /* Check for hardware restriction on number of rx/tx queues */
+ if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
+ vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
+ netdev_info(pdev,
+ "%s: Supports RX/TX Queue counts 1,2, and 4\n",
+ pdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
+ return ERR_PTR(-EBUSY);
+
+ fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+ if (!fwd_adapter)
+ return ERR_PTR(-ENOMEM);
+
+ pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
+ adapter->num_rx_pools++;
+ set_bit(pool, &adapter->fwd_bitmask);
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
+
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ if (err)
+ goto fwd_add_err;
+ fwd_adapter->pool = pool;
+ fwd_adapter->real_adapter = adapter;
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ return fwd_adapter;
+fwd_add_err:
+ /* unwind counter and free adapter struct */
+ netdev_info(pdev,
+ "%s: dfwd hardware acceleration failed\n", vdev->name);
+ clear_bit(pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+ kfree(fwd_adapter);
+ return ERR_PTR(err);
+}
+
+static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = priv;
+ struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
+ unsigned int limit;
+
+ clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+ ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ fwd_adapter->pool, adapter->num_rx_pools,
+ fwd_adapter->rx_base_queue,
+ fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+ kfree(fwd_adapter);
+}
+
+static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ void *priv)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = priv;
+ unsigned int queue;
+ struct ixgbe_ring *tx_ring;
+
+ queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
+ tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
+
+ return __ixgbe_xmit_frame(skb, dev, tx_ring);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -7349,6 +7683,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
+ .ndo_dfwd_add_station = ixgbe_fwd_add,
+ .ndo_dfwd_del_station = ixgbe_fwd_del,
+ .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
};
/**
@@ -7362,19 +7699,16 @@ static const struct net_device_ops ixgbe_netdev_ops = {
**/
static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *entry;
int physfns = 0;
- /* Some cards can not use the generic count PCIe functions method, and
- * so must be hardcoded to the correct value.
+ /* Some cards can not use the generic count PCIe functions method,
+ * because they are behind a parent switch, so we hardcode these with
+ * the correct number of functions.
*/
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_SFP_SF_QP:
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ if (ixgbe_pcie_from_parent(&adapter->hw)) {
physfns = 4;
- break;
- default:
+ } else {
list_for_each(entry, &adapter->pdev->bus_list) {
struct pci_dev *pdev =
list_entry(entry, struct pci_dev, bus_list);
@@ -7653,7 +7987,8 @@ skip_sriov:
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_L2FW_DOFFLOAD;
netdev->hw_features = netdev->features;
@@ -7759,29 +8094,6 @@ skip_sriov:
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
- /* print bus type/speed/width info */
- e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
-
- err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
- if (err)
- strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
- else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, part_str);
-
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these
@@ -7797,6 +8109,19 @@ skip_sriov:
}
ixgbe_check_minimum_link(adapter, expected_gts);
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ e_dev_info("%pM\n", netdev->dev_addr);
+
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
if (err == IXGBE_ERR_EEPROM_VERSION) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e3719e..aae900a256da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -57,28 +57,28 @@
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
-#define IXGBE_SFF_ADDRESSING_MODE 0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b135332..d6f0c0d8cf11 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -129,10 +129,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
if (!pre_existing_vfs && !adapter->num_vfs)
return;
- if (!pre_existing_vfs)
- dev_warn(&adapter->pdev->dev,
- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
-
/* If there are pre-existing VFs then we have to force
* use of that many - over ride any module parameter value.
* This may result from the user unloading the PF driver
@@ -223,17 +219,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_FLUSH(hw);
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ } else {
+ rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
+ }
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
adapter->ring_feature[RING_F_RSS].limit = rss;
/* take a breather then clean up driver data */
msleep(100);
-
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
return 0;
}
@@ -298,13 +296,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
- if (!err && current_flags != adapter->flags) {
- /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
#ifdef CONFIG_PCI_IOV
+ if (!err && current_flags != adapter->flags)
ixgbe_sriov_reinit(adapter);
#endif
- }
return err;
}
@@ -558,7 +553,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
@@ -621,16 +616,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
- unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
- if (enable) {
- eth_zero_addr(vf_mac_addr);
- memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
- }
+ if (enable)
+ eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb9b6d8..7c19e969576f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTDQSEL 0x04904
#define IXGBE_RTTDT1C 0x04908
#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTQCNCR 0x08B00
+#define IXGBE_RTTQCNTG 0x04A90
+#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTQCNRR 0x0498C
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
+#define IXGBE_RTTQCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f5929a..24b80a6cfca4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,12 +32,12 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES 128
-#define IXGBE_X540_MC_TBL_SIZE 128
-#define IXGBE_X540_VFT_TBL_SIZE 128
-#define IXGBE_X540_RX_PB_SIZE 384
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12d6f04..54d9acef9c4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -45,16 +45,27 @@
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+ };
};
-#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
- offsetof(struct ixgbevf_adapter, m), \
- offsetof(struct ixgbevf_adapter, b), \
- offsetof(struct ixgbevf_adapter, r)
+#define IXGBEVF_STAT(m, b, r) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
+ .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+}
+
+#define IXGBEVF_ZSTAT(m) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = -1, \
+ .saved_reset_offset = -1 \
+}
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
- zero_base)},
- {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
- zero_base)},
- {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
- zero_base)},
+ {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
+ {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
+ {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
+#ifdef BP_EXTENDED_STATS
+ {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
+ {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
+ {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
+ {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
+ {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
+ {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
+#endif
};
#define IXGBE_QUEUE_STATS_LEN 0
@@ -140,58 +156,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-static char *ixgbevf_reg_names[] = {
- "IXGBE_VFCTRL",
- "IXGBE_VFSTATUS",
- "IXGBE_VFLINKS",
- "IXGBE_VFRXMEMWRAP",
- "IXGBE_VFFRTIMER",
- "IXGBE_VTEICR",
- "IXGBE_VTEICS",
- "IXGBE_VTEIMS",
- "IXGBE_VTEIMC",
- "IXGBE_VTEIAC",
- "IXGBE_VTEIAM",
- "IXGBE_VTEITR",
- "IXGBE_VTIVAR",
- "IXGBE_VTIVAR_MISC",
- "IXGBE_VFRDBAL0",
- "IXGBE_VFRDBAL1",
- "IXGBE_VFRDBAH0",
- "IXGBE_VFRDBAH1",
- "IXGBE_VFRDLEN0",
- "IXGBE_VFRDLEN1",
- "IXGBE_VFRDH0",
- "IXGBE_VFRDH1",
- "IXGBE_VFRDT0",
- "IXGBE_VFRDT1",
- "IXGBE_VFRXDCTL0",
- "IXGBE_VFRXDCTL1",
- "IXGBE_VFSRRCTL0",
- "IXGBE_VFSRRCTL1",
- "IXGBE_VFPSRTYPE",
- "IXGBE_VFTDBAL0",
- "IXGBE_VFTDBAL1",
- "IXGBE_VFTDBAH0",
- "IXGBE_VFTDBAH1",
- "IXGBE_VFTDLEN0",
- "IXGBE_VFTDLEN1",
- "IXGBE_VFTDH0",
- "IXGBE_VFTDH1",
- "IXGBE_VFTDT0",
- "IXGBE_VFTDT1",
- "IXGBE_VFTXDCTL0",
- "IXGBE_VFTXDCTL1",
- "IXGBE_VFTDWBAL0",
- "IXGBE_VFTDWBAL1",
- "IXGBE_VFTDWBAH0",
- "IXGBE_VFTDWBAH1"
-};
-
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
- return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+ return IXGBE_REGS_LEN * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +232,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
- for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
- hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
@@ -441,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ char *base = (char *) adapter;
int i;
+#ifdef BP_EXTENDED_STATS
+ u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+ tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ rx_cleaned += adapter->rx_ring[i].bp_cleaned;
+ rx_yields += adapter->rx_ring[i].bp_yields;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ tx_cleaned += adapter->tx_ring[i].bp_cleaned;
+ tx_yields += adapter->tx_ring[i].bp_yields;
+ }
+
+ adapter->bp_rx_yields = rx_yields;
+ adapter->bp_rx_cleaned = rx_cleaned;
+ adapter->bp_rx_missed = rx_missed;
+
+ adapter->bp_tx_yields = tx_yields;
+ adapter->bp_tx_cleaned = tx_cleaned;
+ adapter->bp_tx_missed = tx_missed;
+#endif
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- ixgbe_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = (char *)adapter +
- ixgbe_gstrings_stats[i].saved_reset_offset;
- data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ char *p = base + ixgbe_gstrings_stats[i].stat_offset;
+ char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
+
+ if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
+ else
+ data[i] = *(u64 *)p;
+ } else {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -685,6 +678,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
+static int ixgbevf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+static int ixgbevf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_q_vector *q_vector;
+ int num_vectors, i;
+ u16 tx_itr_param, rx_itr_param;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbevf_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
@@ -700,6 +772,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
+ .get_coalesce = ixgbevf_get_coalesce,
+ .set_coalesce = ixgbevf_set_coalesce,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d9867529..8971e2d0a984 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -38,6 +38,11 @@
#include "vf.h"
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
@@ -76,6 +81,11 @@ struct ixgbevf_ring {
struct u64_stats_sync syncp;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_yields;
+ u64 bp_misses;
+ u64 bp_cleaned;
+#endif
u16 head;
u16 tail;
@@ -145,7 +155,118 @@ struct ixgbevf_q_vector {
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
char name[IFNAMSIZ + 9];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define IXGBEVF_QV_STATE_IDLE 0
+#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+
+ spin_lock_init(&q_vector->lock);
+ q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_LOCKED) {
+ WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+ q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->tx.ring->bp_yields++;
+#endif
+ } else {
+ /* we don't care if someone yielded */
+ q_vector->state = IXGBEVF_QV_STATE_NAPI;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+ IXGBEVF_QV_STATE_NAPI_YIELD));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+ q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->rx.ring->bp_yields++;
+#endif
+ } else {
+ /* preserve yield marks */
+ q_vector->state |= IXGBEVF_QV_STATE_POLL;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+ WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+ return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_OWNED)
+ rc = false;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -165,9 +286,13 @@ struct ixgbevf_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -240,7 +365,6 @@ struct ixgbevf_adapter {
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbevf_hw_stats stats;
- u64 zero_base;
/* Interrupt Throttle Rate */
u32 eitr_param;
@@ -249,6 +373,16 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_rx_yields;
+ u64 bp_rx_cleaned;
+ u64 bp_rx_missed;
+
+ u64 bp_tx_yields;
+ u64 bp_tx_cleaned;
+ u64 bp_tx_missed;
+#endif
+
u32 link_speed;
bool link_up;
@@ -281,27 +415,25 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
#else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bbfb371..038bfc8b7616 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.7.12-k"
+#define DRV_VERSION "2.11.3-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
}
/**
+ * ixgbevf_rx_skb - Helper function to determine proper Rx method
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb_mark_napi_id(skb, &q_vector->napi);
+
+ if (ixgbevf_qv_busy_polling(q_vector)) {
+ netif_receive_skb(skb);
+ /* exit early if we busy polled */
+ return;
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+ ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+}
+
+/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
@@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
-static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *rx_ring,
- int budget)
+static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -473,15 +497,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- /*
- * Work around issue of some types of VM to VM loop back
- * packets not getting split correctly
- */
- if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb_headlen(skb);
- if (header_fixup_len < 14)
- skb_push(skb, header_fixup_len);
- }
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
@@ -494,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
+ ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -514,7 +529,7 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
@@ -526,7 +541,7 @@ next_desc:
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- return !!budget;
+ return total_rx_packets;
}
/**
@@ -549,6 +564,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!ixgbevf_qv_lock_napi(q_vector))
+ return budget;
+#endif
+
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
@@ -558,10 +578,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget)
+ < per_ring_budget);
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -580,7 +605,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
* ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information
*/
-static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
@@ -596,6 +621,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *ring;
+ int found = 0;
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return LL_FLUSH_FAILED;
+
+ if (!ixgbevf_qv_lock_poll(q_vector))
+ return LL_FLUSH_BUSY;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+ if (found)
+ ring->bp_cleaned += found;
+ else
+ ring->bp_misses++;
+#endif
+ if (found)
+ break;
+ }
+
+ ixgbevf_qv_unlock_poll(q_vector);
+
+ return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -756,37 +815,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
- struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 msg;
- bool got_ack = false;
hw->mac.get_link_status = 1;
- if (!hw->mbx.ops.check_for_ack(hw))
- got_ack = true;
-
- if (!hw->mbx.ops.check_for_msg(hw)) {
- hw->mbx.ops.read(hw, &msg, 1);
-
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
- adapter->link_up = false;
- }
- if (msg & IXGBE_VT_MSGTYPE_NACK)
- dev_info(&pdev->dev,
- "Last Request of type %2.2x to PF Nacked\n",
- msg & 0xFF);
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
- }
-
- /* checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it
- */
- if (got_ack)
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1107,6 +1141,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
+static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+
+ if (adapter->num_rx_queues > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+}
+
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1154,8 +1203,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
int i, j;
u32 rdlen;
- /* PSRTYPE must be initialized in 82599 */
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
@@ -1293,6 +1341,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
napi_enable(&q_vector->napi);
}
}
@@ -1306,6 +1357,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+ pr_info("QV %d locked\n", q_idx);
+ usleep_range(1000, 20000);
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
@@ -1323,31 +1380,55 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
ixgbevf_alloc_rx_buffers(adapter, ring,
- IXGBE_DESC_UNUSED(ring));
+ ixgbevf_desc_unused(ring));
}
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
int j = adapter->rx_ring[rxr].reg_idx;
- int k;
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
- "not set within the polling period\n", rxr);
- }
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+ rxr);
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
- ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
- adapter->rx_ring[rxr].count - 1);
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1626,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_negotiate_api(adapter);
-
ixgbevf_reset_queues(adapter);
ixgbevf_configure(adapter);
@@ -1679,7 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBEVF_DOWN, &adapter->state);
- /* disable receives */
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1733,10 +1815,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- if (hw->mac.ops.reset_hw(hw))
+ if (hw->mac.ops.reset_hw(hw)) {
hw_dbg(hw, "PF still resetting\n");
- else
+ } else {
hw->mac.ops.init_hw(hw);
+ ixgbevf_negotiate_api(adapter);
+ }
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1929,6 +2013,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbevf_poll, 64);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_add(&q_vector->napi);
+#endif
adapter->q_vector[q_idx] = q_vector;
}
@@ -1938,6 +2025,9 @@ err_out:
while (q_idx) {
q_idx--;
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
@@ -1961,6 +2051,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -2072,6 +2165,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -2082,6 +2178,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
+ ixgbevf_negotiate_api(adapter);
err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
if (err)
dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2194,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
- /* lock to protect mailbox accesses */
- spin_lock_init(&adapter->mbx_lock);
-
/* Enable dynamic interrupt throttling rates */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -2620,8 +2714,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- ixgbevf_negotiate_api(adapter);
-
/* setup queue reg_idx and Rx queue count */
err = ixgbevf_setup_queues(adapter);
if (err)
@@ -3010,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -3021,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbevf_desc_unused(tx_ring) >= size))
return 0;
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
@@ -3216,6 +3308,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
}
pci_set_master(pdev);
+ ixgbevf_reset(adapter);
+
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
rtnl_unlock();
@@ -3224,8 +3318,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
- ixgbevf_reset(adapter);
-
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
@@ -3293,6 +3385,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ixgbevf_busy_poll_recv,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b52635bc0..4d44d64ae387 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 23de82a9da82..f5685c0d0579 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -309,7 +309,7 @@ static void
jme_load_macaddr(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- unsigned char macaddr[6];
+ unsigned char macaddr[ETH_ALEN];
u32 val;
spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, 6);
+ memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
err_out_unmap:
iounmap(jme->regs);
err_out_free_netdev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
err_out_release_regions:
pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
unregister_netdev(netdev);
iounmap(jme->regs);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 3efc897c9913..58cd67c0c8e4 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -28,7 +28,6 @@
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.8"
-#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index a36fa80968eb..4a5e3b0f712e 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
bif->dev = dev;
- memcpy(dev->dev_addr, bif->mac, 6);
+ memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2c210ec35d59..00cd36e08601 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2513,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mac_addr = of_get_mac_address(pnp);
if (mac_addr)
- memcpy(ppd.mac_addr, mac_addr, 6);
+ memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, 6);
+ memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
else
uc_addr_get(mp, dev->dev_addr);
@@ -2890,6 +2890,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
PHY_INTERFACE_MODE_GMII);
if (!mp->phy)
err = -ENODEV;
+ phy_addr_set(mp, mp->phy->addr);
} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
mp->phy = phy_scan(mp, pd->phy_addr);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index e2f662660313..7354960b583b 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -4,11 +4,9 @@
* Since the MDIO interface of Marvell network interfaces is shared
* between all network interfaces, having a single driver allows to
* handle concurrent accesses properly (you may have four Ethernet
- * ports, but they in fact share the same SMI interface to access the
- * MDIO bus). Moreover, this MDIO interface code is similar between
- * the mv643xx_eth driver and the mvneta driver. For now, it is only
- * used by the mvneta driver, but it could later be used by the
- * mv643xx_eth driver as well.
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
*
* Copyright (C) 2012 Marvell
*
@@ -44,6 +42,15 @@
#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
#define MVMDIO_ERR_INT_MASK 0x0080
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
struct orion_mdio_dev {
struct mutex lock;
void __iomem *regs;
@@ -68,77 +75,68 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
static int orion_mdio_wait_ready(struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
-
- if (dev->err_interrupt <= 0) {
- count = 0;
- while (1) {
- if (orion_mdio_smi_is_done(dev))
- break;
-
- if (count > 100) {
- dev_err(bus->parent,
- "Timeout: SMI busy for too long\n");
- return -ETIMEDOUT;
- }
-
- udelay(10);
- count++;
- }
- } else {
- if (!orion_mdio_smi_is_done(dev)) {
+ unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+ unsigned long end = jiffies + timeout;
+ int timedout = 0;
+
+ while (1) {
+ if (orion_mdio_smi_is_done(dev))
+ return 0;
+ else if (timedout)
+ break;
+
+ if (dev->err_interrupt <= 0) {
+ usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
+ MVMDIO_SMI_POLL_INTERVAL_MAX);
+
+ if (time_is_before_jiffies(end))
+ ++timedout;
+ } else {
wait_event_timeout(dev->smi_busy_wait,
- orion_mdio_smi_is_done(dev),
- msecs_to_jiffies(100));
- if (!orion_mdio_smi_is_done(dev))
- return -ETIMEDOUT;
- }
+ orion_mdio_smi_is_done(dev),
+ timeout);
+
+ ++timedout;
+ }
}
- return 0;
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
}
static int orion_mdio_read(struct mii_bus *bus, int mii_id,
int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- int count;
u32 val;
int ret;
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
dev->regs);
- /* Wait for the value to become available */
- count = 0;
- while (1) {
- val = readl(dev->regs);
- if (val & MVMDIO_SMI_READ_VALID)
- break;
-
- if (count > 100) {
- dev_err(bus->parent, "Timeout when reading PHY\n");
- mutex_unlock(&dev->lock);
- return -ETIMEDOUT;
- }
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
- udelay(10);
- count++;
+ val = readl(dev->regs);
+ if (!(val & MVMDIO_SMI_READ_VALID)) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
}
+ ret = val & 0xFFFF;
+out:
mutex_unlock(&dev->lock);
-
- return val & 0xFFFF;
+ return ret;
}
static int orion_mdio_write(struct mii_bus *bus, int mii_id,
@@ -150,10 +148,8 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
mutex_lock(&dev->lock);
ret = orion_mdio_wait_ready(bus);
- if (ret < 0) {
- mutex_unlock(&dev->lock);
- return ret;
- }
+ if (ret < 0)
+ goto out;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
@@ -161,9 +157,9 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(value << MVMDIO_SMI_DATA_SHIFT)),
dev->regs);
+out:
mutex_unlock(&dev->lock);
-
- return 0;
+ return ret;
}
static int orion_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e35bac7cfdf1..7d99e695a110 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2811,7 +2811,7 @@ static int mvneta_probe(struct platform_device *pdev)
}
dt_mac_addr = of_get_mac_address(dn);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+ if (dt_mac_addr) {
mac_from = "device tree";
memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
} else {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ecc7f7b696b8..597846193869 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4046,7 +4046,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e09a8c6f8536..a7df981d2123 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5081,7 +5081,6 @@ err_out_free_regions:
err_out_disable:
pci_disable_device(pdev);
err_out:
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -5124,8 +5123,6 @@ static void sky2_remove(struct pci_dev *pdev)
iounmap(hw->regs);
kfree(hw);
-
- pci_set_drvdata(pdev, NULL);
}
static int sky2_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index bb11624a1f39..1e9970d2f0f3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1539,11 +1539,6 @@ out:
return ret;
}
-static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
-{
- return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
-}
-
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
@@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
struct mlx4_dev *dev = &(priv->dev);
int err;
int admin_vlan_ix = NO_INDX;
- enum mlx4_vlan_transition vlan_trans;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
@@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.link_state == vp_admin->link_state)
return 0;
- vlan_trans = calculate_transition(vp_oper->state.default_vlan,
- vp_admin->default_vlan);
-
if (!(priv->mfunc.master.slave_state[slave].active &&
- dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP &&
- vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) {
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
/* even if the UPDATE_QP command isn't supported, we still want
* to set this VF link according to the admin directive
*/
@@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
- err = __mlx4_register_vlan(&priv->dev, port,
- vp_admin->default_vlan,
- &admin_vlan_ix);
- if (err) {
- kfree(work);
- mlx4_warn((&priv->dev),
- "No vlan resources slave %d, port %d\n",
- slave, port);
- return err;
+ if (MLX4_VGT != vp_admin->default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &admin_vlan_ix);
+ if (err) {
+ kfree(work);
+ mlx4_warn((&priv->dev),
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ } else {
+ admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)),
@@ -1687,7 +1681,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
- port, vp_oper->vlan_idx);
+ port, vp_oper->state.default_vlan);
vp_oper->vlan_idx = NO_INDX;
}
if (NO_INDX != vp_oper->mac_idx) {
@@ -1718,6 +1712,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
+ slave_state[slave].old_vlan_api = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2198,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
return ERR_PTR(-ENOMEM);
}
+ memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
@@ -2253,7 +2250,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_vport_oper_state *vf_oper;
struct mlx4_vport_state *vf_admin;
int slave;
@@ -2269,7 +2265,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
return -EINVAL;
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if ((0 == vlan) && (0 == qos))
vf_admin->default_vlan = MLX4_VGT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 004e4231af67..22fcbe78311c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period);
@@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
}
cq_context = mailbox->buf;
- memset(cq_context, 0, sizeof *cq_context);
-
cq_context->flags = cpu_to_be32(!!collapsed << 18);
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d5047cdb3..3a098cc4d349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
- struct mlx4_en_cq *cq,
- int entries, int ring, enum cq_type mode)
+ struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode,
+ int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
int err;
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
+ if (!cq) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
+ }
+ }
+
cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
@@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->is_tx = mode;
spin_lock_init(&cq->lock);
+ /* Allocate HW buffers on provided NUMA node.
+ * dev->numa_node is used in mtt range allocation flow.
+ */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
- return err;
+ goto err_cq;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- else
- cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+ goto err_res;
+ cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
+ *pcq = cq;
+
+ return 0;
+
+err_res:
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+err_cq:
+ kfree(cq);
+ *pcq = NULL;
return err;
}
@@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_cq *rx_cq;
cq_idx = cq_idx % priv->rx_ring_num;
- rx_cq = &priv->rx_cq[cq_idx];
+ rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
if (!cq->is_tx)
- cq->size = priv->rx_ring[cq->ring].actual_size;
+ cq->size = priv->rx_ring[cq->ring]->actual_size;
if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
(!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
return 0;
}
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq = *pcq;
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
+ kfree(cq);
+ *pcq = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c750985f47e..0596f9f85a0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
int err = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i].moder_cnt = priv->tx_frames;
- priv->tx_cq[i].moder_time = priv->tx_usecs;
+ priv->tx_cq[i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[i]->moder_time = priv->tx_usecs;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
if (err)
return err;
}
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i].moder_cnt = priv->rx_frames;
- priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->rx_cq[i]->moder_cnt = priv->rx_frames;
+ priv->rx_cq[i]->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
if (err)
return err;
}
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
}
}
for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i].packets;
- data[index++] = priv->tx_ring[i].bytes;
+ data[index++] = priv->tx_ring[i]->packets;
+ data[index++] = priv->tx_ring[i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- data[index++] = priv->rx_ring[i].packets;
- data[index++] = priv->rx_ring[i].bytes;
+ data[index++] = priv->rx_ring[i]->packets;
+ data[index++] = priv->rx_ring[i]->bytes;
#ifdef CONFIG_NET_RX_BUSY_POLL
- data[index++] = priv->rx_ring[i].yields;
- data[index++] = priv->rx_ring[i].misses;
- data[index++] = priv->rx_ring[i].cleaned;
+ data[index++] = priv->rx_ring[i]->yields;
+ data[index++] = priv->rx_ring[i]->misses;
+ data[index++] = priv->rx_ring[i]->cleaned;
#endif
}
spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
- if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
- priv->rx_ring[0].size) &&
- tx_size == priv->tx_ring[0].size)
+ if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
+ priv->rx_ring[0]->size) &&
+ tx_size == priv->tx_ring[0]->size)
return 0;
mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
- priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
- param->tx_pending = priv->tx_ring[0].size;
+ priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a071cda2dd04..0d087b03a7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
+ /* Initialize time stamp mechanism */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_init_timestamp(mdev);
+
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (!dev->caps.comp_pool) {
mdev->profile.prof[i].rx_ring_num =
@@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->pndev[i] = NULL;
}
- /* Initialize time stamp mechanism */
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_init_timestamp(mdev);
-
return mdev;
err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fa37b7a61213..e72d8a112a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
int done;
if (!priv->port_up)
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ return MLX4_NET_TRANS_RULE_ID_UDP;
+ case IPPROTO_TCP:
+ return MLX4_NET_TRANS_RULE_ID_TCP;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+};
+
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
- struct mlx4_spec_list spec_tcp = {
- .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ struct mlx4_spec_list spec_tcp_udp = {
+ .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+ if (spec_tcp_udp.id < 0) {
+ en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
+ filter->ip_proto);
+ goto ignore;
+ }
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
- list_add_tail(&spec_tcp.list, &rule.list);
+ list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
+ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
- __be32 dst_ip, __be16 src_port, __be16 dst_port,
- u32 flow_id)
+ __be32 dst_ip, u8 ip_proto, __be16 src_port,
+ __be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter = NULL;
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
+ filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
- __be16 src_port, __be16 dst_port)
+ u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
+ filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
+ u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
+ if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
- if (ip->protocol != IPPROTO_TCP)
- return -EPROTONOSUPPORT;
-
spin_lock_bh(&priv->filters_lock);
- filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
+ src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
- src_ip, dst_ip,
+ src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
@@ -332,8 +355,7 @@ err:
return ret;
}
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring)
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
@@ -417,7 +439,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- int idx;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -425,10 +446,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
- if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
- mlx4_unregister_vlan(mdev->dev, priv->port, idx);
- else
- en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
+ mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
@@ -1223,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1245,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
- priv->tx_ring[i].cons, priv->tx_ring[i].prod);
+ i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
+ priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
}
priv->port_stats.tx_timeout++;
@@ -1286,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1295,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
}
for (i = 0; i < priv->tx_ring_num; i++) {
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
@@ -1329,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) {
spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring].packets;
- rx_bytes = priv->rx_ring[ring].bytes;
+ rx_packets = priv->rx_ring[ring]->packets;
+ rx_bytes = priv->rx_ring[ring]->bytes;
spin_unlock_bh(&priv->stats_lock);
rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1359,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
if (moder_time != priv->last_moder_time[ring]) {
priv->last_moder_time[ring] = moder_time;
- cq = &priv->rx_cq[ring];
+ cq = priv->rx_cq[ring];
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
@@ -1482,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- cq = &priv->rx_cq[i];
+ cq = priv->rx_cq[i];
mlx4_en_cq_init_lock(cq);
@@ -1500,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
- priv->rx_ring[i].cqn = cq->mcq.cqn;
+ priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
@@ -1526,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
- cq = &priv->tx_cq[i];
+ cq = priv->tx_cq[i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
@@ -1542,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
i / priv->num_tx_rings_p_up);
if (err) {
@@ -1612,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
tx_err:
while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1622,9 +1640,9 @@ mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
- mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+ mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
for (i = 0; i < priv->rx_ring_num; i++)
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
@@ -1720,25 +1738,25 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
}
msleep(10);
for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
- struct mlx4_en_cq *cq = &priv->rx_cq[i];
+ struct mlx4_en_cq *cq = priv->rx_cq[i];
local_bh_disable();
while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1749,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
msleep(1);
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
}
}
@@ -1787,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i].bytes = 0;
- priv->tx_ring[i].packets = 0;
- priv->tx_ring[i].tx_csum = 0;
+ priv->tx_ring[i]->bytes = 0;
+ priv->tx_ring[i]->packets = 0;
+ priv->tx_ring[i]->tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_ring[i].bytes = 0;
- priv->rx_ring[i].packets = 0;
- priv->rx_ring[i].csum_ok = 0;
- priv->rx_ring[i].csum_none = 0;
+ priv->rx_ring[i]->bytes = 0;
+ priv->rx_ring[i]->packets = 0;
+ priv->rx_ring[i]->csum_ok = 0;
+ priv->rx_ring[i]->csum_none = 0;
}
}
@@ -1852,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
#endif
for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i].tx_info)
+ if (priv->tx_ring && priv->tx_ring[i])
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i].buf)
+ if (priv->tx_cq && priv->tx_cq[i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- if (priv->rx_ring[i].rx_info)
+ if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
- if (priv->rx_cq[i].buf)
+ if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -1877,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
struct mlx4_en_port_profile *prof = priv->prof;
int i;
int err;
+ int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
@@ -1886,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX))
+ prof->tx_ring_size, i, TX, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
- prof->tx_ring_size, TXBB_SIZE))
+ prof->tx_ring_size, TXBB_SIZE, node))
goto err;
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
+ node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
- prof->rx_ring_size, i, RX))
+ prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
- prof->rx_ring_size, priv->stride))
+ prof->rx_ring_size, priv->stride,
+ node))
goto err;
}
@@ -1918,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i])
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size,
+ priv->stride);
+ if (priv->rx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i])
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ }
return -ENOMEM;
}
@@ -2211,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 331791467a22..dae1a1f4ae55 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
return PTR_ERR(mailbox);
filter = mailbox->buf;
- memset(filter, 0, sizeof(*filter));
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
@@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
@@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
@@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- stats->rx_packets += priv->rx_ring[i].packets;
- stats->rx_bytes += priv->rx_ring[i].bytes;
- priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
- priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
+ stats->rx_packets += priv->rx_ring[i]->packets;
+ stats->rx_bytes += priv->rx_ring[i]->bytes;
+ priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
+ priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
- stats->tx_packets += priv->tx_ring[i].packets;
- stats->tx_bytes += priv->tx_ring[i].bytes;
- priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
+ stats->tx_packets += priv->tx_ring[i]->packets;
+ stats->tx_bytes += priv->tx_ring[i]->bytes;
+ priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa69c86..07a1d0fbae47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
@@ -319,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+ if (!ring) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
+ }
+ }
+
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -335,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc(tmp);
- if (!ring->rx_info)
- return -ENOMEM;
+ ring->rx_info = vmalloc_node(tmp, node);
+ if (!ring->rx_info) {
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
+ }
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err)
- goto err_ring;
+ goto err_info;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
@@ -356,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
+ *pring = ring;
return 0;
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_ring:
+err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
+
return err;
}
@@ -376,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
- ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+ ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE)
@@ -412,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
- ring = &priv->rx_ring[ring_ind];
+ ring = priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
- mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+ mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
- if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
- priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
+ mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring = *pring;
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv, ring);
+ mlx4_en_cleanup_filters(priv);
#endif
}
@@ -592,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
- struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -991,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
- err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+ err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
@@ -1008,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
- priv->rx_ring[0].cqn, -1, &context);
+ priv->rx_ring[0]->cqn, -1, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d669e6..40626690e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
* since we turned the carrier off */
msleep(200);
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
- tx_ring = &priv->tx_ring[i];
+ tx_ring = priv->tx_ring[i];
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
goto retry_tx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82d6ff1..f54ebd5a1702 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444);
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring, int qpn, u32 size,
- u16 stride)
+ struct mlx4_en_tx_ring **pring, int qpn, u32 size,
+ u16 stride, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
int tmp;
int err;
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
+ if (!ring) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
+ }
+ }
+
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
@@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
inline_thold = min(inline_thold, MAX_INLINE);
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info)
- return -ENOMEM;
+ ring->tx_info = vmalloc_node(tmp, node);
+ if (!ring->tx_info) {
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ err = -ENOMEM;
+ goto err_ring;
+ }
+ }
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
if (!ring->bounce_buf) {
- err = -ENOMEM;
- goto err_tx;
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ err = -ENOMEM;
+ goto err_info;
+ }
}
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+ /* Allocate HW buffers on provided NUMA node */
+ set_dev_node(&mdev->dev->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE);
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
@@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
}
ring->qp.event = mlx4_en_sqp_event;
- err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err);
ring->bf.uar = &mdev->priv_uar;
@@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ *pring = ring;
return 0;
err_map:
@@ -129,16 +151,20 @@ err_hwq_res:
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
-err_tx:
+err_info:
vfree(ring->tx_info);
ring->tx_info = NULL;
+err_ring:
+ kfree(ring);
+ *pring = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring)
+ struct mlx4_en_tx_ring **pring)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring = *pring;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_enabled)
@@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
ring->bounce_buf = NULL;
vfree(ring->tx_info);
ring->tx_info = NULL;
+ kfree(ring);
+ *pring = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
- struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index, stamp_index;
@@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_ind = skb->queue_mapping;
- ring = &priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[tx_ind];
if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0416c5b3b35c..c9cdb2a2c596 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
if (err)
goto err_out_free_mtt;
- memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 0d63daa2f422..fda26679f7d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
-
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
@@ -177,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
u8 field;
u32 size;
int err = 0;
@@ -185,18 +184,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
-#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
-#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
-#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
-#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
-#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
-#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
+
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
+#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -237,8 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
} else if (vhcr->op_modifier == 0) {
- /* enable rdma and ethernet interfaces */
- field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
+ /* enable rdma and ethernet interfaces, and new quota locations */
+ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
+ QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
@@ -250,14 +258,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
- size = dev->caps.num_qps;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
- size = dev->caps.num_srqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
- size = dev->caps.num_cqs;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@@ -265,14 +279,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
- size = dev->caps.num_mpts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
- size = dev->caps.num_mtts;
+ size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
} else
err = -EINVAL;
@@ -287,7 +306,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u32 *outbox;
u8 field, op_modifier;
u32 size;
- int err = 0;
+ int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
@@ -311,6 +330,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
func_cap->flags = field;
+ quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@@ -318,29 +338,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
- func_cap->qp_quota = size & 0xFFFFFF;
+ if (quotas) {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
- func_cap->srq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
- func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ } else {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+ }
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
- func_cap->mpt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
- func_cap->mtt_quota = size & 0xFFFFFF;
-
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
- func_cap->mcg_quota = size & 0xFFFFFF;
goto out;
}
@@ -652,7 +693,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
- dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -924,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter);
@@ -1273,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_HCA_IN_SIZE);
-
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
@@ -1573,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
return PTR_ERR(mailbox);
inbox = mailbox->buf;
- memset(inbox, 0, INIT_PORT_IN_SIZE);
-
flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
@@ -1713,7 +1749,6 @@ void mlx4_opreq_action(struct work_struct *work)
u32 *outbox;
u32 modifier;
u16 token;
- u16 type_m;
u16 type;
int err;
u32 num_qps;
@@ -1746,7 +1781,6 @@ void mlx4_opreq_action(struct work_struct *work)
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
- type_m = type >> 12;
type &= 0xfff;
switch (type) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 31d02649be41..5fbf4924c272 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
kfree(icm);
}
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
+ gfp_t gfp_mask, int node)
{
struct page *page;
- page = alloc_pages(gfp_mask, order);
- if (!page)
- return -ENOMEM;
+ page = alloc_pages_node(node, gfp_mask, order);
+ if (!page) {
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return -ENOMEM;
+ }
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
- icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!icm)
- return NULL;
+ icm = kmalloc_node(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
+ dev->numa_node);
+ if (!icm) {
+ icm = kmalloc(sizeof(*icm),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!icm)
+ return NULL;
+ }
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc(sizeof *chunk,
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!chunk)
- goto fail;
+ chunk = kmalloc_node(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN),
+ dev->numa_node);
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM |
+ __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+ }
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
- cur_order, gfp_mask);
+ cur_order, gfp_mask,
+ dev->numa_node);
if (ret) {
if (--cur_order < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60c9f4f103fc..5789ea2c934d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/kmod.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -561,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.num_ports = func_cap.num_ports;
- dev->caps.num_qps = func_cap.qp_quota;
- dev->caps.num_srqs = func_cap.srq_quota;
- dev->caps.num_cqs = func_cap.cq_quota;
- dev->caps.num_eqs = func_cap.max_eq;
- dev->caps.reserved_eqs = func_cap.reserved_eq;
- dev->caps.num_mpts = func_cap.mpt_quota;
- dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->quotas.qp = func_cap.qp_quota;
+ dev->quotas.srq = func_cap.srq_quota;
+ dev->quotas.cq = func_cap.cq_quota;
+ dev->quotas.mpt = func_cap.mpt_quota;
+ dev->quotas.mtt = func_cap.mtt_quota;
+ dev->caps.num_qps = 1 << hca_param.log_num_qps;
+ dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
+ dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
+ dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
@@ -650,6 +655,27 @@ err_mem:
return err;
}
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+ int port;
+ int has_ib_port = false;
+ int has_eth_port = false;
+#define EN_DRV_NAME "mlx4_en"
+#define IB_DRV_NAME "mlx4_ib"
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ has_ib_port = true;
+ else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ has_eth_port = true;
+ }
+
+ if (has_ib_port)
+ request_module_nowait(IB_DRV_NAME);
+ if (has_eth_port)
+ request_module_nowait(EN_DRV_NAME);
+}
+
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
@@ -681,6 +707,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
}
mlx4_set_port_mask(dev);
err = mlx4_register_device(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to register device\n");
+ goto out;
+ }
+ mlx4_request_modules(dev);
}
out:
@@ -2075,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
"aborting.\n");
return err;
}
- if (num_vfs > MLX4_MAX_NUM_VF) {
- printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
- num_vfs, MLX4_MAX_NUM_VF);
+
+ /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
+ * per port, we must limit the number of VFs to 63 (since their are
+ * 128 MACs)
+ */
+ if (num_vfs >= MLX4_MAX_NUM_VF) {
+ dev_err(&pdev->dev,
+ "Requested more VF's (%d) than allowed (%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL;
}
@@ -2154,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ dev->numa_node = dev_to_node(&pdev->dev);
/* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
@@ -2295,6 +2333,8 @@ slave_start:
if (err)
goto err_steer;
+ mlx4_init_quotas(dev);
+
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
@@ -2305,6 +2345,8 @@ slave_start:
if (err)
goto err_port;
+ mlx4_request_modules(dev);
+
mlx4_sense_init(dev);
mlx4_start_sense(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 55f6245efb6c..acf9d5f1f922 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
goto out_list;
}
mgm = mailbox->buf;
- memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
@@ -645,7 +644,7 @@ static const u8 __promisc_mode[] = {
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
- if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+ if (flow_type >= MLX4_FS_MODE_NUM) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
@@ -681,7 +680,7 @@ const u16 __sw_id_hw[] = {
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
@@ -706,7 +705,7 @@ static const int __rule_hw_sz[] = {
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
- if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+ if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
@@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 348bb8c7d9a7..e582a41a802b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -455,6 +455,7 @@ struct mlx4_slave_state {
u8 last_cmd;
u8 init_port_mask;
bool active;
+ bool old_vlan_api;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -503,12 +504,28 @@ struct slave_list {
struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
};
+struct resource_allocator {
+ spinlock_t alloc_lock; /* protect quotas */
+ union {
+ int res_reserved;
+ int res_port_rsvd[MLX4_MAX_PORTS];
+ };
+ union {
+ int res_free;
+ int res_port_free[MLX4_MAX_PORTS];
+ };
+ int *quota;
+ int *allocated;
+ int *guaranteed;
+};
+
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
+ struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
};
#define SLAVE_EVENT_EQ_SIZE 128
@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
+void mlx4_init_quotas(struct mlx4_dev *dev);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e3610d27..f3758de59c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
u16 num_frags;
u16 log_rx_info;
- struct mlx4_en_tx_ring *tx_ring;
- struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq *tx_cq;
- struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
if ((cq->state & MLX4_CQ_LOCKED)) {
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode, int node);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
- int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring,
+ int qpn, u32 size, u16 stride, int node);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- u32 size, u16 stride);
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
#ifdef CONFIG_RFS_ACCEL
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring);
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#endif
#define MLX4_EN_NUM_SELF_TEST 5
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index f91719a08cba..b3ee9bafff5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
goto err_table;
}
mpt_entry = mailbox->buf;
-
- memset(mpt_entry, 0, sizeof *mpt_entry);
-
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
@@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
}
mpt_entry = mailbox->buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
@@ -755,14 +750,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
- if (!is_power_of_2(dev->caps.num_mpts))
- return -EINVAL;
-
/* Nothing to do for slaves - all MR handling is forwarded
* to the master */
if (mlx4_is_slave(dev))
return 0;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223acada7..84cfb40bf451 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
err = -ENOMEM;
goto out;
}
- uar = kmalloc(sizeof *uar, GFP_KERNEL);
+ uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
if (!uar) {
- err = -ENOMEM;
- goto out;
+ uar = kmalloc(sizeof(*uar), GFP_KERNEL);
+ if (!uar) {
+ err = -ENOMEM;
+ goto out;
+ }
}
err = mlx4_uar_alloc(dev, uar);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 946e0af5faef..97d342fa5032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
- int err;
+ int err = -EINVAL;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ err = mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
+ if (err && err == -EINVAL && mlx4_is_slave(dev)) {
+ /* retry using old REG_MAC format */
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ dev->flags |= MLX4_FLAG_OLD_REG_MAC;
+ }
if (err)
return err;
@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
- RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
+ (void) mlx4_cmd_imm(dev, mac, &out_param,
+ ((u32) port) << 8 | (u32) RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ } else {
+ /* use old unregister mac format */
+ set_param_l(&out_param, port);
+ (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ }
return;
}
__mlx4_unregister_mac(dev, port, mac);
@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
u64 out_param = 0;
int err;
+ if (vlan > 4095)
+ return -EINVAL;
+
if (mlx4_is_mfunc(dev)) {
- set_param_l(&out_param, port);
- err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ err = mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int index;
- if (index < MLX4_VLAN_REGULAR) {
- mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
- return;
+ mutex_lock(&table->mutex);
+ if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
+ mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
+ goto out;
}
- mutex_lock(&table->mutex);
- if (!table->refs[index]) {
- mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
goto out;
}
+
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify vlan table\n", index);
+ mlx4_dbg(dev, "Have %d more references for index %d,"
+ "no need to modify vlan table\n", table->refs[index],
+ index);
goto out;
}
table->entries[index] = 0;
@@ -410,23 +435,19 @@ out:
mutex_unlock(&table->mutex);
}
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
- u64 in_param = 0;
- int err;
+ u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
- set_param_l(&in_param, port);
- err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
- MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
- if (!err)
- mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
- index);
-
+ (void) mlx4_cmd_imm(dev, vlan, &out_param,
+ ((u32) port) << 8 | (u32) RES_VLAN,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
return;
}
- __mlx4_unregister_vlan(dev, port, index);
+ __mlx4_unregister_vlan(dev, port, vlan);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
@@ -448,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
inbuf = inmailbox->buf;
outbuf = outmailbox->buf;
- memset(inbuf, 0, 256);
- memset(outbuf, 0, 256);
inbuf[0] = 1;
inbuf[1] = 1;
inbuf[2] = 1;
@@ -632,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memset(mailbox->buf, 0, 256);
-
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
@@ -671,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
@@ -706,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
context->base_qpn = cpu_to_be32(base_qpn);
context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
@@ -740,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
for (i = 0; i < MLX4_NUM_UP; i += 2)
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
@@ -767,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- memset(context, 0, sizeof *context);
for (i = 0; i < MLX4_NUM_TC; i++) {
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index e891b058c1be..2715e61dbb74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
*/
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
- 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
+ (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
reserved_from_top);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd6876321116..2f3f2bc7f283 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -55,6 +55,14 @@ struct mac_res {
u8 port;
};
+struct vlan_res {
+ struct list_head list;
+ u16 vlan;
+ int ref_count;
+ int vlan_index;
+ u8 port;
+};
+
struct res_common {
struct list_head list;
struct rb_node node;
@@ -102,7 +110,14 @@ struct res_qp {
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
+ /* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
+ __be32 param3;
+ u8 vlan_control;
+ u8 fvl_rx;
+ u8 pri_path_fl;
+ u8 vlan_index;
+ u8 feup;
};
enum res_mtt_states {
@@ -266,6 +281,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
+ case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
@@ -274,10 +290,139 @@ static const char *ResourceType(enum mlx4_resource rt)
};
}
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
+static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int err = -EINVAL;
+ int allocated, free, reserved, guaranteed, from_free;
+
+ if (slave > dev->num_vfs)
+ return -EINVAL;
+
+ spin_lock(&res_alloc->alloc_lock);
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ free = (port > 0) ? res_alloc->res_port_free[port - 1] :
+ res_alloc->res_free;
+ reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
+ res_alloc->res_reserved;
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated + count > res_alloc->quota[slave])
+ goto out;
+
+ if (allocated + count <= guaranteed) {
+ err = 0;
+ } else {
+ /* portion may need to be obtained from free area */
+ if (guaranteed - allocated > 0)
+ from_free = count - (guaranteed - allocated);
+ else
+ from_free = count;
+
+ if (free - from_free > reserved)
+ err = 0;
+ }
+
+ if (!err) {
+ /* grant the request */
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+ res_alloc->res_port_free[port - 1] -= count;
+ } else {
+ res_alloc->allocated[slave] += count;
+ res_alloc->res_free -= count;
+ }
+ }
+
+out:
+ spin_unlock(&res_alloc->alloc_lock);
+ return err;
+}
+
+static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource res_type, int count,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[res_type];
+
+ if (slave > dev->num_vfs)
+ return;
+
+ spin_lock(&res_alloc->alloc_lock);
+ if (port > 0) {
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+ res_alloc->res_port_free[port - 1] += count;
+ } else {
+ res_alloc->allocated[slave] -= count;
+ res_alloc->res_free += count;
+ }
+
+ spin_unlock(&res_alloc->alloc_lock);
+ return;
+}
+
+static inline void initialize_res_quotas(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ enum mlx4_resource res_type,
+ int vf, int num_instances)
+{
+ res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+ res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
+ if (vf == mlx4_master_func_num(dev)) {
+ res_alloc->res_free = num_instances;
+ if (res_type == RES_MTT) {
+ /* reserved mtts will be taken out of the PF allocation */
+ res_alloc->res_free += dev->caps.reserved_mtts;
+ res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
+ res_alloc->quota[vf] += dev->caps.reserved_mtts;
+ }
+ }
+}
+
+void mlx4_init_quotas(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pf;
+
+ /* quotas for VFs are initialized in mlx4_slave_cap */
+ if (mlx4_is_slave(dev))
+ return;
+
+ if (!mlx4_is_mfunc(dev)) {
+ dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev);
+ dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
+ dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
+ dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
+ dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
+ return;
+ }
+
+ pf = mlx4_master_func_num(dev);
+ dev->quotas.qp =
+ priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
+ dev->quotas.cq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
+ dev->quotas.srq =
+ priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
+ dev->quotas.mtt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
+ dev->quotas.mpt =
+ priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
+ int i, j;
int t;
priv->mfunc.master.res_tracker.slave_list =
@@ -298,8 +443,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ struct resource_allocator *res_alloc =
+ &priv->mfunc.master.res_tracker.res_alloc[i];
+ res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+ if (i == RES_MAC || i == RES_VLAN)
+ res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
+ (dev->num_vfs + 1) * sizeof(int),
+ GFP_KERNEL);
+ else
+ res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+
+ if (!res_alloc->quota || !res_alloc->guaranteed ||
+ !res_alloc->allocated)
+ goto no_mem_err;
+
+ spin_lock_init(&res_alloc->alloc_lock);
+ for (t = 0; t < dev->num_vfs + 1; t++) {
+ switch (i) {
+ case RES_QP:
+ initialize_res_quotas(dev, res_alloc, RES_QP,
+ t, dev->caps.num_qps -
+ dev->caps.reserved_qps -
+ mlx4_num_reserved_sqps(dev));
+ break;
+ case RES_CQ:
+ initialize_res_quotas(dev, res_alloc, RES_CQ,
+ t, dev->caps.num_cqs -
+ dev->caps.reserved_cqs);
+ break;
+ case RES_SRQ:
+ initialize_res_quotas(dev, res_alloc, RES_SRQ,
+ t, dev->caps.num_srqs -
+ dev->caps.reserved_srqs);
+ break;
+ case RES_MPT:
+ initialize_res_quotas(dev, res_alloc, RES_MPT,
+ t, dev->caps.num_mpts -
+ dev->caps.reserved_mrws);
+ break;
+ case RES_MTT:
+ initialize_res_quotas(dev, res_alloc, RES_MTT,
+ t, dev->caps.num_mtts -
+ dev->caps.reserved_mtts);
+ break;
+ case RES_MAC:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ res_alloc->guaranteed[t] = 2;
+ }
+ break;
+ case RES_VLAN:
+ if (t == mlx4_master_func_num(dev)) {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
+ res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_free[j] =
+ res_alloc->quota[t];
+ } else {
+ res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
+ res_alloc->guaranteed[t] = 0;
+ }
+ break;
+ case RES_COUNTER:
+ res_alloc->quota[t] = dev->caps.max_counters;
+ res_alloc->guaranteed[t] = 0;
+ if (t == mlx4_master_func_num(dev))
+ res_alloc->res_free = res_alloc->quota[t];
+ break;
+ default:
+ break;
+ }
+ if (i == RES_MAC || i == RES_VLAN) {
+ for (j = 0; j < MLX4_MAX_PORTS; j++)
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
+ } else {
+ res_alloc->res_reserved += res_alloc->guaranteed[t];
+ }
+ }
+ }
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
- return 0 ;
+ return 0;
+
+no_mem_err:
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
+ return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -309,13 +551,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
- if (type != RES_TR_FREE_STRUCTS_ONLY)
- for (i = 0 ; i < dev->num_slaves; i++)
+ if (type != RES_TR_FREE_STRUCTS_ONLY) {
+ for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
+ }
+ /* free master's vlans */
+ i = dev->caps.function;
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ rem_slave_vlans(dev, i);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
if (type != RES_TR_FREE_SLAVES_ONLY) {
+ for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
+ priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
+ priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
+ kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
+ priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
+ }
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
@@ -1229,12 +1486,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
case RES_OP_RESERVE:
count = get_param_l(&in_param);
align = get_param_h(&in_param);
- err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
@@ -1282,15 +1546,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
order = get_param_l(&in_param);
+
+ err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
+ if (err)
+ return err;
+
base = __mlx4_alloc_mtt_range(dev, order);
- if (base == -1)
+ if (base == -1) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
+ }
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
- if (err)
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
- else
+ } else {
set_param_l(out_param, base);
+ }
return err;
}
@@ -1305,13 +1578,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE:
+ err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
+ if (err)
+ break;
+
index = __mlx4_mpt_reserve(dev);
- if (index == -1)
+ if (index == -1) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
+ }
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
@@ -1345,12 +1625,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_cq_alloc_icm(dev, &cqn);
+ err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
@@ -1373,12 +1660,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- err = __mlx4_srq_alloc_icm(dev, &srqn);
+ err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
+ break;
+ }
+
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
@@ -1399,9 +1693,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct mac_res *res;
+ if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
+ return -EINVAL;
res = kzalloc(sizeof *res, GFP_KERNEL);
- if (!res)
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
+ }
res->mac = mac;
res->port = (u8) port;
list_add_tail(&res->list,
@@ -1421,6 +1719,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res);
break;
}
@@ -1438,12 +1737,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
__mlx4_unregister_mac(dev, res->port, res->mac);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
@@ -1452,7 +1752,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE_AND_MAP)
return err;
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
@@ -1469,12 +1769,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port, int vlan_index)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ /* vlan found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
+
+ if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
+ return -EINVAL;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
+ return -ENOMEM;
+ }
+ res->vlan = vlan;
+ res->port = (u8) port;
+ res->vlan_index = vlan_index;
+ res->ref_count = 1;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
+
+static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ if (res->vlan == vlan && res->port == (u8) port) {
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_VLAN,
+ 1, port);
+ kfree(res);
+ }
+ break;
+ }
+ }
+}
+
+static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *vlan_list =
+ &tracker->slave_list[slave].res_list[RES_VLAN];
+ struct vlan_res *res, *tmp;
+ int i;
+
+ list_for_each_entry_safe(res, tmp, vlan_list, list) {
+ list_del(&res->list);
+ /* dereference the vlan the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_vlan(dev, res->port, res->vlan);
+ mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
+ kfree(res);
+ }
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int in_port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err;
+ u16 vlan;
+ int vlan_index;
+ int port;
+
+ port = !in_port ? get_param_l(out_param) : in_port;
+
+ if (!port || op != RES_OP_RESERVE_AND_MAP)
+ return -EINVAL;
+
+ /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
+ if (!in_port && port > 0 && port <= dev->caps.num_ports) {
+ slave_state[slave].old_vlan_api = true;
+ return 0;
+ }
+
+ vlan = (u16) in_param;
+
+ err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
+ if (!err) {
+ set_param_l(out_param, (u32) vlan_index);
+ err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
+ if (err)
+ __mlx4_unregister_vlan(dev, port, vlan);
+ }
+ return err;
+}
+
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
@@ -1484,15 +1886,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE)
return -EINVAL;
- err = __mlx4_counter_alloc(dev, &index);
+ err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
+ err = __mlx4_counter_alloc(dev, &index);
+ if (err) {
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ return err;
+ }
+
err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
- if (err)
+ if (err) {
__mlx4_counter_free(dev, index);
- else
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ } else {
set_param_l(out_param, index);
+ }
return err;
}
@@ -1528,7 +1938,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
int err;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
@@ -1556,12 +1966,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -1597,6 +2009,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
@@ -1634,8 +2047,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
- if (!err)
+ if (!err) {
+ mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
+ }
return err;
}
@@ -1660,6 +2075,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
@@ -1694,6 +2110,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
@@ -1718,6 +2135,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
break;
+ mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
@@ -1730,14 +2148,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
- port = get_param_l(out_param);
+ port = !in_port ? get_param_l(out_param) : in_port;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -1751,9 +2169,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
- return 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ if (slave_state[slave].old_vlan_api)
+ return 0;
+ if (!port)
+ return -EINVAL;
+ vlan_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_vlan(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1771,6 +2207,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
@@ -1803,7 +2240,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
int err = -EINVAL;
int alop = vhcr->op_modifier;
- switch (vhcr->in_modifier) {
+ switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
@@ -1831,12 +2268,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param,
+ (vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
@@ -2136,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
+ qp->param3 = 0;
+ qp->vlan_control = 0;
+ qp->fvl_rx = 0;
+ qp->pri_path_fl = 0;
+ qp->vlan_index = 0;
+ qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
@@ -2862,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
+ __be32 orig_param3 = qpc->param3;
+ u8 orig_vlan_control = qpc->pri_path.vlan_control;
+ u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
+ u8 orig_pri_path_fl = qpc->pri_path.fl;
+ u8 orig_vlan_index = qpc->pri_path.vlan_index;
+ u8 orig_feup = qpc->pri_path.feup;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
@@ -2889,9 +3340,15 @@ out:
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
- if (!err)
+ if (!err) {
qp->sched_queue = orig_sched_queue;
-
+ qp->param3 = orig_param3;
+ qp->vlan_control = orig_vlan_control;
+ qp->fvl_rx = orig_fvl_rx;
+ qp->pri_path_fl = orig_pri_path_fl;
+ qp->vlan_index = orig_vlan_index;
+ qp->feup = orig_feup;
+ }
put_res(dev, slave, qpn, RES_QP);
return err;
}
@@ -3498,6 +3955,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ if (!valid_reserved(dev, slave, qpn)) {
+ __mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_release_resource(dev, slave,
+ RES_QP, 1, 0);
+ }
kfree(qp);
state = 0;
break;
@@ -3569,6 +4031,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
@@ -3635,6 +4099,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
@@ -3698,6 +4164,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave,
+ RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
@@ -3767,6 +4235,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ mlx4_release_resource(dev, slave, RES_MTT,
+ 1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
@@ -3925,6 +4395,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
}
spin_unlock_irq(mlx4_tlock(dev));
@@ -3964,7 +4435,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
- /*VLAN*/
+ rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
@@ -3991,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
- u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
+ u64 qp_path_mask_vlan_ctrl =
+ ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
- (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
+
+ u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
@@ -4029,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
- upd_context->qp_context.pri_path.vlan_control = vlan_control;
- upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4049,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
spin_lock_irq(mlx4_tlock(dev));
continue;
}
- upd_context->qp_context.pri_path.sched_queue =
- qp->sched_queue & 0xC7;
- upd_context->qp_context.pri_path.sched_queue |=
- ((work->qos & 0x7) << 3);
+ if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
+ upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
+ else
+ upd_context->primary_addr_path_mask =
+ cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
+ if (work->vlan_id == MLX4_VGT) {
+ upd_context->qp_context.param3 = qp->param3;
+ upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
+ upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
+ upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
+ upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
+ upd_context->qp_context.pri_path.feup = qp->feup;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue;
+ } else {
+ upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
+ upd_context->qp_context.pri_path.vlan_control = vlan_control;
+ upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
+ upd_context->qp_context.pri_path.fvl_rx =
+ qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.fl =
+ qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ upd_context->qp_context.pri_path.feup =
+ qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
+ upd_context->qp_context.pri_path.sched_queue =
+ qp->sched_queue & 0xC7;
+ upd_context->qp_context.pri_path.sched_queue |=
+ ((work->qos & 0x7) << 3);
+ }
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
@@ -4081,7 +4582,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
- work->orig_vlan_ix);
+ work->orig_vlan_id);
out:
kfree(work);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 79fd269e2c54..8fdf23753779 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -188,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
}
srq_context = mailbox->buf;
- memset(srq_context, 0, sizeof *srq_context);
-
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 075f4e21d33d..c83d16dc7cd5 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
ks_wrreg16(ks, KS_MARL, w);
- memcpy(ks->mac_addr, data, 6);
+ memcpy(ks->mac_addr, data, ETH_ALEN);
if (ks->enabled)
ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
}
netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
+ memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
ks_set_mac(ks, netdev->dev_addr);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 8ebc352bcbe6..ddd252a3da9c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
- pci_set_drvdata(pdev, NULL);
-
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
static char pcidev_name[] = "ksz884xp";
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index ea54d95e5b9f..cbd013379252 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -26,7 +26,6 @@
#include <linux/of_irq.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
#include "moxart_ether.h"
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 149355b52ad0..68026f7e8ba3 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
{
- int rc = true;
+ bool rc = true;
spin_lock_bh(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
- memcpy(data, &ha->addr, 6);
+ memcpy(data, &ha->addr, ETH_ALEN);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, 6);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return 0;
}
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 7a5e295588b0..64ec2a437f46 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
err_pci_request_regions:
free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
pci_release_regions (pdev);
iounmap(ioaddr);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 4da172ac5599..7007d212f3e4 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -264,6 +264,7 @@ int xtsonic_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
lp->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
netdev_boot_setup_check(dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 51b00941302c..9eeddbd0b2c7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
free_shared_mem(sp);
pci_disable_device(pdev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
iounmap(sp->bar0);
iounmap(sp->bar1);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5a20eaf903dd..8614eeb7de81 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4739,7 +4739,6 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
pci_disable_sriov(pdev);
_exit3:
@@ -4782,7 +4781,6 @@ static void vxge_remove(struct pci_dev *pdev)
vxge_free_mac_add_list(&vdev->vpaths[i]);
vxge_device_unregister(hldev);
- pci_set_drvdata(pdev, NULL);
/* Do not call pci_disable_sriov here, as it will break child devices */
vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 622aa75904c4..1b326cbcd34b 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1545,7 +1545,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac))
+ if (mac)
memcpy(netdev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 6797b1075874..2a9003071d51 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
extern const char pch_driver_version[];
/* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
- struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
/* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
/* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
- u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index cac33e5f9bc2..b6bdeb3c1971 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
iounmap(hmp->base);
free_netdev(dev);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d28593b1fc3e..07a890eb72ad 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -513,7 +513,6 @@ err_out_unmap_rx:
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
pci_release_regions (pdev);
free_netdev (dev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 5b65356e7568..dbaa49e58b0c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
pasemi_dma_free_chan(&mac->tx->chan);
pasemi_dma_free_chan(&mac->rx->chan);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 32675e16021e..9adcdbb49476 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
- int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
extern const struct ethtool_ops netxen_nic_ethtool_ops;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 32c790659f9c..0c64c82b9acf 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -958,6 +958,7 @@ enum {
#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
/* MiniDIMM related macros */
#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 8375cbde9969..67efe754367d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
mac_req = (nx_mac_req_t *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cbd75f97ffb3..3bec8cfebf99 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
return 0;
}
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
#ifdef CONFIG_PCIEAER
static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
{
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_msi;
}
+ netxen_read_ula_info(adapter);
+
err = netxen_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 91a8fcd6c246..0758b9435358 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3916,7 +3916,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
iounmap(qdev->mem_map_registers);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81bf83604c4f..631ea0ac1cd8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 52
+#define QLCNIC_LINUX_VERSIONID "5.3.52"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,8 +98,22 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-#define QLCNIC_MAX_TX_RINGS 8
-#define QLCNIC_MAX_SDS_RINGS 8
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
/*
* Following are the states of the Phantom. Phantom will set them and
@@ -533,6 +547,14 @@ struct qlcnic_host_sds_ring {
char name[IFNAMSIZ + 12];
} ____cacheline_internodealigned_in_smp;
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
struct qlcnic_host_tx_ring {
int irq;
void __iomem *crb_intr_mask;
@@ -544,10 +566,7 @@ struct qlcnic_host_tx_ring {
u32 sw_consumer;
u32 num_desc;
- u64 xmit_on;
- u64 xmit_off;
- u64 xmit_called;
- u64 xmit_finished;
+ struct qlcnic_tx_queue_stats tx_stats;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
@@ -940,8 +959,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_EANBLE 0xC
#define QLCNIC_BEACON_DISABLE 0xD
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
-#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -961,8 +978,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
-#define __QLCNIC_DCB_STATE 14
-#define __QLCNIC_DCB_IN_AEN 15
+#define __QLCNIC_MAINTENANCE_MODE 16
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -1013,7 +1029,6 @@ struct qlcnic_adapter {
unsigned long state;
u32 flags;
- int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
@@ -1021,7 +1036,13 @@ struct qlcnic_adapter {
u16 max_jumbo_rxd;
u8 max_rds_rings;
- u8 max_sds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
u8 rx_csum;
u8 portnum;
@@ -1199,6 +1220,7 @@ struct qlcnic_npar_info {
u8 promisc_mode;
u8 offload_flags;
u8 pci_func;
+ u8 mac[ETH_ALEN];
};
struct qlcnic_eswitch {
@@ -1543,12 +1565,13 @@ int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
-int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1641,19 +1664,18 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int err, tx_q;
-
- tx_q = adapter->max_drv_tx_rings;
+ int err;
- netdev->num_tx_queues = tx_q;
- netdev->real_num_tx_queues = tx_q;
+ netdev->num_tx_queues = adapter->drv_tx_rings;
+ netdev->real_num_tx_queues = adapter->drv_tx_rings;
- err = netif_set_real_num_tx_queues(netdev, tx_q);
+ err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
if (err)
dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
- tx_q);
+ adapter->drv_tx_rings);
else
- dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+ dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
+ adapter->drv_tx_rings);
return err;
}
@@ -1695,7 +1717,7 @@ struct qlcnic_hardware_ops {
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
- int (*setup_intr) (struct qlcnic_adapter *, u8, int);
+ int (*setup_intr) (struct qlcnic_adapter *);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1766,10 +1788,9 @@ static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
- u8 num_intr, int txq)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
+ return adapter->ahw->hw_ops->setup_intr(adapter);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -2005,7 +2026,7 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
{
test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
/* When operating in a muti tx mode, driver needs to write 0x1
@@ -2115,98 +2136,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
return status;
}
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_hw_capability)
- return dcb->ops->get_hw_capability(adapter);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->free)
- dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->attach)
- return dcb->ops->attach(adapter);
-
- return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_hw_capability)
- return dcb->ops->query_hw_capability(adapter, buf);
-
- return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_info)
- dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->query_cee_param)
- return dcb->ops->query_cee_param(adapter, buf, type);
-
- return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->get_cee_cfg)
- return dcb->ops->get_cee_cfg(adapter);
-
- return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->register_aen)
- dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *msg)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->handle_aen)
- dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (dcb && dcb->ops->init_dcbnl_ops)
- dcb->ops->init_dcbnl_ops(adapter);
-}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index ace217c447dd..09810ddd11ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/aer.h>
-#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
@@ -268,20 +267,18 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
+ num_msix = adapter->drv_sds_rings;
+
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- num_msix += adapter->max_drv_tx_rings;
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -325,7 +322,8 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
{
- writel(1, adapter->tgt_mask_reg);
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
}
/* Enable MSI-x and INT-x interrupts */
@@ -498,8 +496,11 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
num_msix = 0;
msleep(20);
- synchronize_irq(adapter->msix_entries[num_msix].vector);
- free_irq(adapter->msix_entries[num_msix].vector, adapter);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
}
int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
@@ -760,6 +761,9 @@ int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
int cmd_type, err, opcode;
unsigned long timeout;
+ if (!mbx)
+ return -EIO;
+
opcode = LSW(cmd->req.arg[0]);
cmd_type = cmd->type;
err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
@@ -902,7 +906,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
QLCNIC_MBX_RSP(event[0]));
break;
case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
- qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -979,14 +983,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
- num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
index = 2;
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
memset(&sds_mbx, 0, sds_mbx_size);
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
@@ -1021,7 +1025,7 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
index = 0;
/* status descriptor ring */
- for (i = 8; i < adapter->max_sds_rings; i++) {
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
sds = &recv_ctx->sds_rings[i];
sds->crb_sts_consumer = ahw->pci_base0 +
mbx_out->host_csmr[index];
@@ -1079,10 +1083,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
num_rds = adapter->max_rds_rings;
- if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
- num_sds = adapter->max_sds_rings;
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
else
- num_sds = QLCNIC_MAX_RING_SETS;
+ num_sds = QLCNIC_MAX_SDS_RINGS;
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
@@ -1183,7 +1187,7 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
- if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
err = qlcnic_83xx_add_rings(adapter);
out:
qlcnic_free_mbx_args(&cmd);
@@ -1239,9 +1243,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx.size = tx->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
- msix_vector = adapter->max_sds_rings + ring;
+ msix_vector = adapter->drv_sds_rings + ring;
else
- msix_vector = adapter->max_sds_rings - 1;
+ msix_vector = adapter->drv_sds_rings - 1;
msix_id = ahw->intr_tbl[msix_vector].id;
} else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
@@ -1264,7 +1268,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
@@ -1279,7 +1284,7 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
tx->ctx_id = mbx_out->ctx_id;
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
@@ -1290,7 +1295,7 @@ out:
}
static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
- int num_sds_ring)
+ u8 num_sds_ring)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1306,7 +1311,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -1320,7 +1325,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (ret) {
qlcnic_detach(adapter);
if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
- adapter->max_sds_rings = num_sds_ring;
+ adapter->drv_sds_rings = num_sds_ring;
qlcnic_attach(adapter);
}
netif_device_attach(netdev);
@@ -1333,7 +1338,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
@@ -1354,7 +1359,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
- int max_sds_rings)
+ u8 drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
@@ -1362,7 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -1386,7 +1391,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
}
}
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1648,7 +1653,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
@@ -1670,7 +1677,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
mode == QLCNIC_ILB_MODE ? "internal" : "external");
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_alloc;
@@ -1708,10 +1715,11 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_83xx_clear_lb_mode(adapter, mode);
free_diag_res:
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_alloc:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -2321,19 +2329,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
i++;
memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
i = i + 3;
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "id = %d active = %d type = %d\n"
- "\tport = %d min bw = %d max bw = %d\n"
- "\tmac_addr = %pM\n", pci_info->id,
- pci_info->active, pci_info->type,
- pci_info->default_port,
- pci_info->tx_min_bw,
- pci_info->tx_max_bw, pci_info->mac);
}
- if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max functions = %d, active functions = %d\n",
- ahw->max_pci_func, ahw->act_pci_func);
-
} else {
dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
err = -EIO;
@@ -3061,11 +3057,14 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
int status = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* Get port configuration info */
- status = qlcnic_83xx_get_port_info(adapter);
- /* Get Link Status related info */
- config = qlcnic_83xx_test_link(adapter);
- ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
/* hard code until there is a way to get it from flash */
ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
@@ -3279,12 +3278,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
{
return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
- sizeof(adapter->ahw->ext_reg_tbl)) +
- (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
- sizeof(adapter->ahw->reg_tbl));
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
}
int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3305,10 +3304,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
u32 data;
u16 intrpt_id, id;
- u8 val;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret;
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev, "Device is resetting\n");
@@ -3321,7 +3321,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
}
ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
- max_sds_rings);
+ drv_sds_rings);
if (ret)
goto fail_diag_irq;
@@ -3358,10 +3358,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
- qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
qlcnic_release_diag_lock(adapter);
return ret;
}
@@ -3381,10 +3382,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
}
config = ahw->port_config;
if (config & QLC_83XX_CFG_STD_PAUSE) {
- if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
pause->tx_pause = 1;
- if (config & QLC_83XX_CFG_STD_RX_PAUSE)
pause->rx_pause = 1;
+ }
}
if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3439,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
} else if (!pause->rx_pause && !pause->tx_pause) {
- ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
}
status = qlcnic_83xx_set_port_config(adapter);
if (status) {
@@ -3503,7 +3516,7 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
if (err)
return err;
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
qlcnic_83xx_set_vnic_opmode(adapter);
} else {
@@ -3530,6 +3543,9 @@ void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
{
+ if (!mbx)
+ return;
+
destroy_workqueue(mbx->work_q);
kfree(mbx);
}
@@ -3650,6 +3666,9 @@ void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ if (!mbx)
+ return;
+
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
complete(&mbx->completion);
cancel_work_sync(&mbx->work);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 533e150503af..4cae6caa6bfa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -61,7 +61,6 @@
#define QLC_83XX_HOST_SDS_MBX_IDX 8
#define QLCNIC_HOST_RDS_MBX_IDX 88
-#define QLCNIC_MAX_RING_SETS 8
/* Pause control registers */
#define QLC_83XX_SRE_SHIM_REG 0x0D200284
@@ -183,8 +182,8 @@ struct qlcnic_rcv_mbx_out {
u8 num_pci_func;
u8 state;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
struct qlcnic_add_rings_mbx_out {
@@ -197,8 +196,8 @@ struct qlcnic_add_rings_mbx_out {
u8 sts_num;
u8 rcv_num;
#endif
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
} __packed;
/* Transmit context mailbox inbox registers
@@ -363,6 +362,9 @@ enum qlcnic_83xx_states {
#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
@@ -412,8 +414,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
-#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
-#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
@@ -521,7 +521,7 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -626,7 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f09e787af0b2..89208e5b25d6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_dump_fw(adapter);
}
return -EIO;
}
@@ -897,7 +902,7 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
- if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
if (qlcnic_check_diag_status(adapter)) {
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
+ /* Do not reschedule if firmaware is in hanged state and auto
+ * recovery is disabled
+ */
+ if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+ return;
+
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2022,6 +2033,8 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = ahw->max_rx_ques;
/* eSwitch capability indicates vNIC mode.
* vNIC and SRIOV are mutually exclusive operational modes.
* If SR-IOV capability is detected, SR-IOV physical function
@@ -2034,7 +2047,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
return QLC_83XX_DEFAULT_OPMODE;
if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
- return QLC_83XX_VIRTUAL_NIC_MODE;
+ return QLCNIC_VNIC_MODE;
return QLC_83XX_DEFAULT_OPMODE;
}
@@ -2048,15 +2061,20 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
if (ret == -EIO)
return -EIO;
- if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ adapter->max_sds_rings = ahw->max_rx_ques;
+ adapter->max_tx_rings = ahw->max_tx_ques;
} else {
return -EIO;
}
@@ -2159,13 +2177,34 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
return err;
}
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_dcb *dcb;
int err = 0;
ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ qlcnic_83xx_init_rings(adapter);
+
err = qlcnic_83xx_init_mailbox_work(adapter);
if (err)
goto exit;
@@ -2178,22 +2217,26 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
return err;
}
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
err = qlcnic_83xx_check_hw_status(adapter);
if (err)
goto detach_mbx;
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
err = qlcnic_83xx_get_fw_info(adapter);
if (err)
goto detach_mbx;
err = qlcnic_83xx_idc_init(adapter);
if (err)
- goto clear_fw_info;
+ goto detach_mbx;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto disable_intr;
@@ -2215,13 +2258,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
goto disable_mbx_intr;
+
/* Perform operating mode specific initialization */
err = adapter->nic_ops->init_driver(adapter);
if (err)
goto disable_mbx_intr;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
@@ -2233,12 +2279,10 @@ disable_mbx_intr:
disable_intr:
qlcnic_teardown_intr(adapter);
-clear_fw_info:
- kfree(ahw->fw_info);
-
detach_mbx:
qlcnic_83xx_detach_mailbox_work(adapter);
qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
exit:
return err;
}
@@ -2251,7 +2295,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
clear_bit(QLC_83XX_MBX_READY, &idc->status);
cancel_delayed_work_sync(&adapter->fw_work);
- if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
qlcnic_83xx_idc_detach_driver(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 0248a4c2f5dd..734d28602ac3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
{
- int err = -EIO;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
qlcnic_83xx_get_minidump_template(adapter);
+
if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
if (qlcnic_init_pci_info(adapter))
return err;
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->act_pci_func);
+
if (qlcnic_83xx_set_vnic_opmode(adapter))
return err;
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
return err;
qlcnic_83xx_config_vnic_buff_descriptors(adapter);
- adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
qlcnic_83xx_enable_vnic_mode(adapter, 1);
- dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
- adapter->ahw->fw_hal_version);
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
return 0;
}
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
- int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
{
struct qlcnic_info nic_info;
int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
else
err = -EIO;
- return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
- int id, err = 0;
-
- err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
- if (err)
- return err;
-
- if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
- if (!qlcnic_enable_eswitch(adapter, id, 1))
- adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
- else
- err = -EIO;
- }
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 86850dd633a1..859cb161fc63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -270,7 +270,7 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
int err;
nrds_rings = adapter->max_rds_rings;
- nsds_rings = adapter->max_sds_rings;
+ nsds_rings = adapter->drv_sds_rings;
rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
nsds_rings);
@@ -475,7 +475,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- temp_nsds_rings = adapter->max_sds_rings;
+ temp_nsds_rings = adapter->drv_sds_rings;
index = temp_nsds_rings + ring;
msix_id = ahw->intr_tbl[index].id;
prq->msi_index = cpu_to_le16(msix_id);
@@ -512,7 +512,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
intr_mask = ahw->intr_tbl[index].src;
tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
@@ -582,7 +582,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
@@ -616,7 +616,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
@@ -664,7 +664,7 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
if (err)
goto err_out;
- for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
err = qlcnic_fw_cmd_create_tx_ctx(dev,
&dev->tx_ring[ring],
ring);
@@ -703,7 +703,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_del_rx_ctx(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
@@ -733,7 +733,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->hw_consumer != NULL) {
dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
@@ -764,7 +764,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
@@ -895,6 +895,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ adapter->max_tx_rings = npar_info->max_tx_ques;
+ adapter->max_sds_rings = npar_info->max_rx_ques;
}
qlcnic_free_mbx_args(&cmd);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index d62d5ce432ec..86bca7c14f99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
static void qlcnic_dcb_aen_work(struct work_struct *);
static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
-
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
-
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
struct qlcnic_dcb_capability {
bool tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
.query_cee_param = qlcnic_83xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
.register_aen = qlcnic_83xx_dcb_register_aen,
- .handle_aen = qlcnic_83xx_dcb_handle_aen,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
};
static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
.get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
.query_cee_param = qlcnic_82xx_dcb_query_cee_param,
.get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
- .handle_aen = qlcnic_82xx_dcb_handle_aen,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
};
static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
return j;
}
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
{
- if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
- adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
}
static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
}
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{
struct qlcnic_dcb *dcb;
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
adapter->dcb = dcb;
dcb->adapter = adapter;
qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
return 0;
}
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
+ struct qlcnic_adapter *adapter;
if (!dcb)
return;
- qlcnic_dcb_register_aen(adapter, 0);
+ adapter = dcb->adapter;
+ qlcnic_dcb_register_aen(dcb, 0);
- while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
usleep_range(10000, 11000);
cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
adapter->dcb = NULL;
}
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
{
- qlcnic_dcb_get_hw_capability(adapter);
- qlcnic_dcb_get_cee_cfg(adapter);
- qlcnic_dcb_register_aen(adapter, 1);
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ qlcnic_dcb_register_aen(dcb, 1);
}
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err = 0;
INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
if (!dcb->wq) {
- dev_err(&adapter->pdev->dev,
+ dev_err(&dcb->adapter->pdev->dev,
"DCB workqueue allocation failed. DCB will be disabled\n");
return -1;
}
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
goto out_free_cfg;
}
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(dcb);
return 0;
out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
return err;
}
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
- char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
u32 mbx_out;
int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
return err;
}
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
- err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
if (err)
return err;
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
if (cap->max_num_tc > QLC_DCB_MAX_TC ||
cap->max_ets_tc > cap->max_num_tc ||
cap->max_pfc_tc > cap->max_num_tc) {
- dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
return -EINVAL;
}
return err;
}
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
struct qlcnic_dcb_capability *cap;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 type)
{
u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
struct device *dev = &adapter->pdev->dev;
dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
return -EINVAL;
}
- addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
@@ -488,72 +489,67 @@ out:
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
- dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
return err;
}
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
struct qlcnic_dcb_mbx_params *mbx;
int err;
- mbx = adapter->dcb->param;
+ mbx = dcb->param;
if (!mbx)
return 0;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
QLC_DCB_LOCAL_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
QLC_DCB_OPER_PARAM_FWID);
if (err)
return err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
QLC_DCB_PEER_PARAM_FWID);
if (err)
return err;
mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
static void qlcnic_dcb_aen_work(struct work_struct *work)
{
- struct qlcnic_adapter *adapter;
struct qlcnic_dcb *dcb;
dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
- adapter = dcb->adapter;
- qlcnic_dcb_get_cee_cfg(adapter);
- clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
}
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
-
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
u32 mbx_out;
int err;
- err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
if (err)
return err;
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
return err;
}
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
char *buf, u8 idx)
{
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_dcb_mbx_params mbx_out;
int err, i, j, k, max_app, size;
struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
return err;
}
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
int err;
- err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
if (err)
return err;
- qlcnic_dcb_data_cee_param_map(adapter);
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
return err;
}
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
- bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
{
u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_adapter *adapter = dcb->adapter;
struct qlcnic_cmd_args cmd;
int err;
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
return err;
}
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
- void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
{
- struct qlcnic_dcb *dcb = adapter->dcb;
u32 *val = data;
- if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
return;
if (*val & BIT_8)
- set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
else
- clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
}
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
}
static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
{
- memcpy(addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
}
static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
*prio = *pgid = *bw_per = *up_tc_map = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
*bw_pct = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->tc_param_valid)
return;
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
*setting = 0;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
!type->pfc_mode_enable)
return;
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return -EINVAL;
switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
.protocol = id,
};
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb *dcb = adapter->dcb;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
return 0;
return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_dcb_cee *type;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 1;
type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
*app_count = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
struct qlcnic_dcb_app *app;
int i, j;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
struct qlcnic_dcb_cee *peer;
u8 i, j, k, map;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
pfc->pfc_en = 0;
- if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
return 0;
peer = &cfg->type[QLC_DCB_PEER_IDX];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index b87ce9fb503e..c04ae0cdc108 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -8,26 +8,29 @@
#ifndef __QLCNIC_DCBX_H
#define __QLCNIC_DCBX_H
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
#ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
#else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
{ return 0; }
#endif
+struct qlcnic_dcb;
+
struct qlcnic_dcb_ops {
- void (*init_dcbnl_ops) (struct qlcnic_adapter *);
- void (*free) (struct qlcnic_adapter *);
- int (*attach) (struct qlcnic_adapter *);
- int (*query_hw_capability) (struct qlcnic_adapter *, char *);
- int (*get_hw_capability) (struct qlcnic_adapter *);
- void (*get_info) (struct qlcnic_adapter *);
- int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
- int (*get_cee_cfg) (struct qlcnic_adapter *);
- int (*register_aen) (struct qlcnic_adapter *, bool);
- void (*handle_aen) (struct qlcnic_adapter *, void *);
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ int (*register_aen) (struct qlcnic_dcb *, bool);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
};
struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
struct workqueue_struct *wq;
struct qlcnic_dcb_ops *ops;
struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+ kfree(dcb);
+ dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ff83a9fcd4c5..b36c02fafcfd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -27,43 +27,36 @@ static const u32 qlcnic_fw_dump_level[] = {
};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"xmit_called", QLC_SIZEOF(stats.xmitcalled),
- QLC_OFF(stats.xmitcalled)},
+ QLC_OFF(stats.xmitcalled)},
{"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
- QLC_OFF(stats.xmitfinished)},
- {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ QLC_OFF(stats.xmitfinished)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
{"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
{"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
- {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
- QLC_OFF(stats.rx_dma_map_error)},
- {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
- QLC_OFF(stats.tx_dma_map_error)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
- QLC_OFF(stats.mac_filter_limit_overrun)},
+ QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
- "rx unicast frames",
- "rx multicast frames",
- "rx broadcast frames",
- "rx dropped frames",
- "rx errors",
- "rx local frames",
- "rx numbytes",
"tx unicast frames",
"tx multicast frames",
"tx broadcast frames",
@@ -71,6 +64,13 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx errors",
"tx local frames",
"tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
};
static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
@@ -126,13 +126,16 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"xmit_on",
"xmit_off",
"xmit_called",
"xmit_finished",
+ "tx_bytes",
};
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -187,8 +190,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
return -1;
}
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
@@ -219,7 +222,15 @@ static const u32 ext_diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_ETHTOOL_REGS_VER 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
static int qlcnic_get_regs_len(struct net_device *dev)
{
@@ -231,7 +242,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
else
len = sizeof(ext_diag_registers) + sizeof(diag_registers);
- return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +506,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
u32 *regs_buff = p;
int ring, i = 0;
@@ -512,21 +527,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
- regs_buff[i++] = 1; /* No. of tx ring */
- regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
- regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
- regs_buff[i++] = 2; /* No. of rx ring */
- regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
- regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
- regs_buff[i++] = adapter->max_sds_rings;
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
}
}
@@ -635,46 +664,88 @@ qlcnic_set_ringparam(struct net_device *dev,
return qlcnic_reset_context(adapter);
}
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (qlcnic_82xx_check(adapter) &&
+ (tx_ring > adapter->max_tx_rings)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+
+ if (qlcnic_83xx_check(adapter) &&
+ (tx_ring > QLCNIC_SINGLE_RING)) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
+ tx_ring, QLCNIC_SINGLE_RING);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int min;
-
- min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
- channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
- channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->max_drv_tx_rings;
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
+ struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
- int txq = 0;
if (channel->other_count || channel->combined_count)
return -EINVAL;
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
if (channel->rx_count) {
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
return err;
+ }
}
- if (qlcnic_82xx_check(adapter) && channel->tx_count) {
- err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
- if (err)
+ if (channel->tx_count) {
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
return err;
- txq = channel->tx_count;
+ }
}
- err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
- netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
- adapter->max_sds_rings, adapter->max_drv_tx_rings);
+ err = qlcnic_setup_rings(adapter, channel->rx_count,
+ channel->tx_count);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
return err;
}
@@ -876,7 +947,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
- int ret, max_sds_rings = adapter->max_sds_rings;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -905,10 +976,10 @@ done:
qlcnic_free_mbx_args(&cmd);
free_diag_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_diag_irq:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -984,8 +1055,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_drv_tx_rings = adapter->max_drv_tx_rings;
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
@@ -1040,11 +1111,11 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
qlcnic_clear_lb_mode(adapter, mode);
free_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_it:
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_drv_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1097,11 +1168,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
- for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
for (index = 0; index < num_stats; index++) {
- sprintf(data, "tx_ring_%d %s", i,
- qlcnic_tx_ring_stats_strings[index]);
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
data += ETH_GSTRING_LEN;
}
}
@@ -1199,6 +1270,36 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
+static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
+ adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
+ adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
+ }
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
+}
+
static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1206,19 +1307,20 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size, ring;
+ int index, ret, length, size, tx_size, ring;
char *p;
- memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+
+ memset(data, 0, tx_size * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
tx_ring = &adapter->tx_ring[ring];
- *data++ = tx_ring->xmit_on;
- *data++ = tx_ring->xmit_off;
- *data++ = tx_ring->xmit_called;
- *data++ = tx_ring->xmit_finished;
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
}
}
+
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1260,7 +1362,7 @@ static int qlcnic_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
@@ -1318,7 +1420,7 @@ static int qlcnic_set_led(struct net_device *dev,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(dev, max_sds_rings);
+ qlcnic_diag_free_res(dev, drv_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
@@ -1659,7 +1761,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
bool valid_mask = false;
int i, ret = 0;
- u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
@@ -1712,9 +1813,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- netdev_info(netdev, "Device in FAILED state\n");
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index b64e2bef9428..6f7f60c09f07 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req = (struct qlcnic_mac_req *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 272c356cf9b2..13303e7d1ed7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -146,6 +146,12 @@ struct qlcnic_mailbox_metadata {
#define QLCNIC_MBX_PORT_RSP_OK 0x1a
#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
struct qlcnic_pci_info;
struct qlcnic_info;
struct qlcnic_cmd_args;
@@ -176,7 +182,7 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 66c26cf7a2b8..e9c21e5d0ca9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -236,7 +236,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
spin_lock_init(&rds_ring->lock);
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 11b4bb83b930..0149c9495347 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,10 +581,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
- if (qlcnic_check_multi_tx(adapter))
- tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
- else
- tx_ring = &adapter->tx_ring[0];
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -607,8 +604,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_start_queue(tx_ring->txq);
} else {
- adapter->stats.xmit_off++;
- tx_ring->xmit_off++;
+ tx_ring->tx_stats.xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -669,9 +665,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
- adapter->stats.txbytes += skb->len;
- adapter->stats.xmitcalled++;
- tx_ring->xmit_called++;
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -789,6 +784,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -805,8 +803,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
- adapter->stats.xmitfinished++;
- tx_ring->xmit_finished++;
+ tx_ring->tx_stats.xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -823,8 +820,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_tx_wake_queue(tx_ring->txq);
- adapter->stats.xmit_on++;
- tx_ring->xmit_on++;
+ tx_ring->tx_stats.xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -844,6 +840,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -1011,7 +1008,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
break;
case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
- qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
break;
default:
break;
@@ -1463,18 +1460,18 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
NAPI_POLL_WEIGHT);
} else {
- if (ring == (adapter->max_sds_rings - 1))
+ if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_poll,
NAPI_POLL_WEIGHT);
@@ -1491,7 +1488,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
}
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
NAPI_POLL_WEIGHT);
@@ -1508,7 +1505,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -1516,7 +1513,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_sds_rings(adapter->recv_ctx);
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
@@ -1535,7 +1532,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
@@ -1544,8 +1541,8 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
if (qlcnic_check_multi_tx(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1563,7 +1560,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
napi_synchronize(&sds_ring->napi);
@@ -1573,7 +1570,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!adapter->ahw->diag_test &&
qlcnic_check_multi_tx(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_disable_tx_int(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1911,7 +1908,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1920,7 +1917,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
@@ -1938,7 +1935,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED)
qlcnic_83xx_disable_intr(adapter, sds_ring);
@@ -1948,7 +1945,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
napi_synchronize(&tx_ring->napi);
@@ -1965,10 +1962,10 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
return -ENOMEM;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
@@ -1994,7 +1991,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
@@ -2012,7 +2009,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
@@ -2021,7 +2018,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d8f4897e9e82..05c1eef8df13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -548,36 +548,75 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.io_resume = qlcnic_82xx_io_resume,
};
-static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int num_tx_q;
- if (ahw->msix_supported &&
+ if (qlcnic_82xx_check(adapter) &&
(ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
- num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
- num_online_cpus());
- if (num_tx_q > 1) {
- test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
- &adapter->state);
- adapter->max_drv_tx_rings = num_tx_q;
- }
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
} else {
- adapter->max_drv_tx_rings = 1;
+ return 1;
}
}
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
+ adapter->drv_tx_rings);
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+
+ dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
+ adapter->drv_sds_rings);
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int max_tx_rings, max_sds_rings, tx_vector;
+ int drv_tx_rings, drv_sds_rings, tx_vector;
int err = -1, i;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
- max_tx_rings = 0;
+ drv_tx_rings = 0;
tx_vector = 0;
} else {
- max_tx_rings = adapter->max_drv_tx_rings;
+ drv_tx_rings = adapter->drv_tx_rings;
tx_vector = 1;
}
@@ -589,7 +628,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return -ENOMEM;
}
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
if (adapter->ahw->msix_supported) {
@@ -602,18 +641,18 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- adapter->max_sds_rings = num_msix -
- max_tx_rings - 1;
+ adapter->drv_sds_rings = num_msix -
+ drv_tx_rings - 1;
} else {
adapter->ahw->num_msix = num_msix;
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test &&
- (adapter->max_drv_tx_rings > 1))
- max_sds_rings = num_msix - max_tx_rings;
+ (adapter->drv_tx_rings > 1))
+ drv_sds_rings = num_msix - drv_tx_rings;
else
- max_sds_rings = num_msix;
+ drv_sds_rings = num_msix;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -624,13 +663,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (max_tx_rings + 1);
+ err -= drv_tx_rings + 1;
num_msix = rounddown_pow_of_two(err);
- num_msix += (max_tx_rings + 1);
+ num_msix += drv_tx_rings + 1;
} else {
num_msix = rounddown_pow_of_two(err);
if (qlcnic_check_multi_tx(adapter))
- num_msix += max_tx_rings;
+ num_msix += drv_tx_rings;
}
if (num_msix) {
@@ -683,25 +722,14 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
- if (!num_intr)
- num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = adapter->drv_sds_rings;
- if (ahw->msix_supported) {
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- num_intr));
- if (qlcnic_check_multi_tx(adapter)) {
- if (txq)
- adapter->max_drv_tx_rings = txq;
- num_msix += adapter->max_drv_tx_rings;
- }
- } else {
- num_msix = 1;
- }
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
@@ -819,7 +847,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
- int i, ret = 0, j = 0;
+ int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
@@ -860,7 +888,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
continue;
if (qlcnic_port_eswitch_cfg_capability(adapter)) {
- if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
adapter->npars[j].eswitch_status = true;
else
continue;
@@ -875,15 +904,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
j++;
}
- if (qlcnic_82xx_check(adapter)) {
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- qlcnic_enable_eswitch(adapter, i, 1);
}
kfree(pci_info);
@@ -1138,14 +1168,18 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
- /* Disable NPAR for 83XX */
- if (qlcnic_83xx_check(adapter))
- return err;
-
- if (adapter->ahw->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
return err;
}
@@ -1293,6 +1327,8 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
"HAL Version: %d, Privileged function\n",
adapter->ahw->fw_hal_version);
}
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
}
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
@@ -1552,7 +1588,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- num_sds_rings = adapter->max_sds_rings;
+ num_sds_rings = adapter->drv_sds_rings;
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
@@ -1586,7 +1622,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
(adapter->flags & QLCNIC_MSIX_ENABLED) &&
!(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
snprintf(tx_ring->name, sizeof(tx_ring->name),
@@ -1614,7 +1650,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
free_irq(sds_ring->irq, sds_ring);
}
@@ -1623,7 +1659,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
(qlcnic_82xx_check(adapter) &&
qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
+ for (ring = 0; ring < adapter->drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring->irq)
@@ -1677,7 +1713,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->ahw->linkup = 0;
- if (adapter->max_sds_rings > 1)
+ if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
qlcnic_config_intr_coalesce(adapter);
@@ -1719,6 +1755,7 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1737,8 +1774,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_reset_rx_buffers_list(adapter);
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+ spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1814,16 +1852,16 @@ void qlcnic_detach(struct qlcnic_adapter *adapter)
adapter->is_up = 0;
}
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int max_tx_rings = adapter->max_drv_tx_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
@@ -1834,8 +1872,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
qlcnic_detach(adapter);
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
- adapter->max_drv_tx_rings = max_tx_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1901,10 +1939,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
- adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1925,7 +1963,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
@@ -2072,7 +2110,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
return err;
}
- qlcnic_dcb_init_dcbnl_ops(adapter);
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
return 0;
}
@@ -2098,7 +2136,7 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_tx_ring *tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
vfree(tx_ring->cmd_buf_arr);
@@ -2116,14 +2154,14 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *cmd_buf_arr;
- tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ tx_ring = kcalloc(adapter->drv_tx_rings,
sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
if (tx_ring == NULL)
return -ENOMEM;
adapter->tx_ring = tx_ring;
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, ring);
@@ -2138,11 +2176,11 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
if (qlcnic_83xx_check(adapter) ||
(qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
- for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- index = adapter->max_sds_rings + ring;
+ index = adapter->drv_sds_rings + ring;
vector = adapter->msix_entries[index].vector;
tx_ring->irq = vector;
}
@@ -2166,17 +2204,6 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
- return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
- kfree(adapter->dcb);
- adapter->dcb = NULL;
-}
-
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2185,6 +2212,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ struct qlcnic_dcb *dcb;
if (pdev->is_virtfn)
return -ENODEV;
@@ -2271,6 +2299,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
+ spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
@@ -2285,38 +2314,51 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_maintenance_mode;
}
- qlcnic_get_multiq_capability(adapter);
-
- if ((adapter->ahw->act_pci_func > 2) &&
- qlcnic_check_multi_tx(adapter)) {
- adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
- dev_info(&adapter->pdev->dev,
- "vNIC mode enabled, Set max TX rings = %d\n",
- adapter->max_drv_tx_rings);
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
}
- if (!qlcnic_check_multi_tx(adapter)) {
- clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
- adapter->max_drv_tx_rings = 1;
- }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
} else if (qlcnic_83xx_check(adapter)) {
- adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
if (err) {
- dev_err(&pdev->dev, "%s: failed\n", __func__);
- goto err_out_free_hw;
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
+ dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
+ dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ goto err_out_maintenance_mode;
+ }
}
+
if (qlcnic_sriov_vf_check(adapter))
return 0;
} else {
@@ -2344,7 +2386,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -2414,13 +2456,20 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
err = register_netdev(netdev);
if (err) {
@@ -2451,7 +2500,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
- qlcnic_dcb_free(adapter);
+ qlcnic_dcb_free(adapter->dcb);
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
@@ -2490,7 +2539,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
if (adapter->qlcnic_wq) {
destroy_workqueue(adapter->qlcnic_wq);
@@ -2543,12 +2591,11 @@ static int qlcnic_resume(struct pci_dev *pdev)
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state;
int err;
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
return -EIO;
}
@@ -2710,24 +2757,21 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
- if (qlcnic_82xx_check(adapter)) {
- for (ring = 0; ring < adapter->max_drv_tx_rings;
- ring++) {
- tx_ring = &adapter->tx_ring[ring];
- dev_info(&netdev->dev, "ring=%d\n", ring);
- dev_info(&netdev->dev, "crb_intr_mask=%d\n",
- readl(tx_ring->crb_intr_mask));
- dev_info(&netdev->dev, "producer=%d\n",
- readl(tx_ring->crb_cmd_producer));
- dev_info(&netdev->dev, "sw_consumer = %d\n",
- tx_ring->sw_consumer);
- dev_info(&netdev->dev, "hw_consumer = %d\n",
- le32_to_cpu(*(tx_ring->hw_consumer)));
- dev_info(&netdev->dev, "xmit-on=%llu\n",
- tx_ring->xmit_on);
- dev_info(&netdev->dev, "xmit-off=%llu\n",
- tx_ring->xmit_off);
- }
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d\n", ring);
+ netdev_info(netdev,
+ "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_intr_mask),
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
}
adapter->ahw->reset_context = 1;
}
@@ -2841,7 +2885,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_intr(adapter->irq, sds_ring);
}
@@ -3261,8 +3305,9 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
return;
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
- netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
__func__);
qlcnic_api_unlock(adapter);
@@ -3329,7 +3374,7 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
@@ -3354,6 +3399,8 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
u32 state = 0, heartbeat;
u32 peg_status;
int err = 0;
@@ -3378,7 +3425,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
qlcnic_reset_hw_context(adapter);
return 0;
@@ -3421,6 +3468,9 @@ detach:
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
}
return 1;
@@ -3502,7 +3552,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0, 0);
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
@@ -3647,130 +3697,90 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw = QLCNIC_MAX_TX_RINGS;
- u32 max_allowed;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
- return -EINVAL;
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ max_hw_rings = adapter->max_tx_rings;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
}
- if (!qlcnic_check_multi_tx(adapter)) {
- netdev_err(netdev, "No Multi TX-Q support\n");
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
- if (txq > QLCNIC_MAX_TX_RINGS) {
- netdev_err(netdev, "Invalid ring count\n");
+ if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((txq > max_allowed) || !is_power_of_2(txq)) {
- if (!is_power_of_2(txq))
- netdev_err(netdev,
- "TX queue should be a power of 2\n");
- if (txq > num_online_cpus())
- netdev_err(netdev,
- "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
- num_online_cpus());
- netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ if (ring_cnt < 2) {
+ netdev_err(netdev,
+ "%s rings value should not be lower than 2\n", buf);
return -EINVAL;
}
- return 0;
-}
-
-int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
-{
- struct net_device *netdev = adapter->netdev;
- u8 max_hw = adapter->ahw->max_rx_ques;
- u32 max_allowed;
-
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
return -EINVAL;
}
- if (val > QLCNIC_MAX_SDS_RINGS) {
- netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLCNIC_MAX_SDS_RINGS);
- return -EINVAL;
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
}
- max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
- num_online_cpus()));
- if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
- if (!is_power_of_2(val))
- netdev_err(netdev, "RSS value should be a power of 2\n");
-
- if (val < 2)
- netdev_err(netdev, "RSS value should not be lower than 2\n");
-
- if (val > max_hw)
- netdev_err(netdev,
- "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
- max_hw);
-
- if (val > num_online_cpus())
- netdev_err(netdev,
- "RSS value should not be higher than[%u], number of online CPUs in the system\n",
- num_online_cpus());
-
- netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
-
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
return -EINVAL;
}
+
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
{
- int err;
struct net_device *netdev = adapter->netdev;
- int num_msix;
+ int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
- if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
- !qlcnic_use_msi) {
- netdev_err(netdev, "No RSS support in INT-x mode\n");
- return -EINVAL;
- }
-
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
- if (qlcnic_82xx_check(adapter)) {
- if (txq != 0)
- adapter->max_drv_tx_rings = txq;
-
- if (qlcnic_check_multi_tx(adapter) &&
- (txq > adapter->max_drv_tx_rings))
- num_msix = adapter->max_drv_tx_rings;
- else
- num_msix = data;
- }
-
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
- netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
-
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data, txq);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+
+ netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 15513608d480..7763962e2ec4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
}
if (ops_index == ops_cnt) {
- dev_info(&adapter->pdev->dev,
- "Invalid entry type %d, exiting dump\n",
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
entry->hdr.type);
- goto error;
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
}
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
}
- if (dump_size != buf_offset) {
- dev_info(&adapter->pdev->dev,
- "Captured(%d) and expected size(%d) do not match\n",
- buf_offset, dump_size);
- goto error;
- } else {
- fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
- adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
- adapter->netdev->name, fw_dump->size);
- /* Send a udev event to notify availability of FW dump */
- kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
- return 0;
- }
-error:
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+ adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
if (fw_dump->use_pex_dma)
dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
fw_dump->dma_buffer, fw_dump->phys_addr);
- vfree(fw_dump->data);
- return -EINVAL;
+
+ return 0;
}
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 392b9bd12b4f..21a4b274d2e4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
int pci_using_dac)
{
+ struct qlcnic_dcb *dcb;
int err;
INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -507,7 +508,11 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1, 0);
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -533,8 +538,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- if (adapter->dcb && qlcnic_dcb_attach(adapter))
- qlcnic_clear_dcb_ops(adapter);
+ dcb = adapter->dcb;
+
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
@@ -1577,7 +1584,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
- qlcnic_dcb_get_info(adapter);
+ qlcnic_dcb_get_info(adapter->dcb);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 019f4377307f..1a9f8a400e50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -156,7 +156,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- int err, max_sds_rings = adapter->max_sds_rings;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
u8 h_beacon_state, b_state, b_rate;
@@ -211,7 +211,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
out:
if (!ahw->beacon_state)
@@ -1272,7 +1272,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -1286,8 +1285,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
@@ -1313,7 +1311,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state;
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -1323,8 +1320,7 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 899433778466..0c9c4e895595 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.32"
+#define DRV_VERSION "1.00.00.33"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_OB_DUMP */
#ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
#endif
#ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
#else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#endif
#ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 2553cf4503b9..a245dc18d769 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
}
}
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ return;
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == ETH_P_8021Q &&
+ tags[8] == ETH_P_8021Q)
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
goto err_out;
}
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
- if (skb->len > ndev->mtu + ETH_HLEN) {
+ if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
- length-ETH_HLEN);
- skb->len += length-ETH_HLEN;
- skb->data_len += length-ETH_HLEN;
- skb->truesize += length-ETH_HLEN;
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+ (struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length;
skb->truesize += length;
length -= length;
- __pskb_pull_tail(skb,
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
} else {
/*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
}
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
return skb;
}
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+ if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
}
}
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status = 0;
+
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
+
+ /* update the features with resent change */
+ ndev->features = features;
+
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
+ return status;
+}
+
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
+ int err;
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
+
return features;
}
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ value |= NIC_RCV_CFG_RV;
+ mask |= (NIC_RCV_CFG_RV << 16);
+ }
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index e9dc84943cfc..1e49ec5b2232 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1231,7 +1231,6 @@ err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d2e591955bdd..f2a2128165dd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb8aba0..50a92104dd0a 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
pci_release_regions (pdev);
free_netdev(dev);
- pci_set_drvdata (pdev, NULL);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3397cee89777..799387570766 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
rtl_disable_msi(pdev, tp);
rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
}
static const struct net_device_ops rtl_netdev_ops = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b57c278d3b46..d256ce19d4de 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -689,7 +689,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = 0x00000001,
+ .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
@@ -872,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, 6);
+ memcpy(ndev->dev_addr, mac, ETH_ALEN);
} else {
ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
@@ -2663,6 +2663,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ if (!pd) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto out_release;
+ }
+
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a0db02c63b11..f32c1692d310 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -321,6 +321,9 @@ enum TD_STS_BIT {
#define TD_TFP (TD_TFP1|TD_TFP0)
/* RMCR */
+enum RMCR_BIT {
+ RMCR_RNC = 0x00000001,
+};
#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21f9ad6392e9..676c3c057bfb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
return rc;
}
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+ unsigned int i;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+ for (i = 0; i < nic_data->n_piobufs; i++) {
+ MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[i]);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc);
+ }
+
+ nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+ unsigned int i;
+ size_t outlen;
+ int rc = 0;
+
+ BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+ for (i = 0; i < n; i++) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ break;
+ if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = -EIO;
+ break;
+ }
+ nic_data->piobuf_handle[i] =
+ MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated PIO buffer %u handle %x\n", i,
+ nic_data->piobuf_handle[i]);
+ }
+
+ nic_data->n_piobufs = i;
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned int offset, index;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+ /* Link a buffer to each VI in the write-combining mapping */
+ for (index = 0; index < nic_data->n_piobufs; ++index) {
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ nic_data->pio_write_vi_base + index, index,
+ rc);
+ goto fail;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u\n",
+ nic_data->pio_write_vi_base + index, index);
+ }
+
+ /* Link a buffer to each TX queue */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ /* We assign the PIO buffers to queues in
+ * reverse order to allow for the following
+ * special case.
+ */
+ offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+ tx_queue->channel->channel - 1) *
+ efx_piobuf_size);
+ index = offset / ER_DZ_TX_PIOBUF_SIZE;
+ offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+ /* When the host page size is 4K, the first
+ * host page in the WC mapping may be within
+ * the same VI page as the last TX queue. We
+ * can only link one buffer to each VI.
+ */
+ if (tx_queue->queue == nic_data->pio_write_vi_base) {
+ BUG_ON(index != 0);
+ rc = 0;
+ } else {
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_TXQ_INSTANCE,
+ tx_queue->queue);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+
+ if (rc) {
+ /* This is non-fatal; the TX path just
+ * won't use PIO for this queue
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ tx_queue->queue, index, rc);
+ tx_queue->piobuf = NULL;
+ } else {
+ tx_queue->piobuf =
+ nic_data->pio_write_base +
+ index * EFX_VI_PAGE_SIZE + offset;
+ tx_queue->piobuf_offset = offset;
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u offset %x addr %p\n",
+ tx_queue->queue, index,
+ tx_queue->piobuf_offset,
+ tx_queue->piobuf);
+ }
+ }
+ }
+
+ return 0;
+
+fail:
+ while (index--) {
+ MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+ inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+ return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
static void efx_ef10_remove(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
/* This needs to be after efx_ptp_remove_channel() with no filters */
efx_ef10_rx_free_indir_table(efx);
+ if (nic_data->wc_membase)
+ iounmap(nic_data->wc_membase);
+
rc = efx_ef10_free_vis(efx);
WARN_ON(rc != 0);
+ if (!nic_data->must_restore_piobufs)
+ efx_ef10_free_piobufs(efx);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
return 0;
}
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
static int efx_ef10_dimension_resources(struct efx_nic *efx)
{
- unsigned int n_vis =
- max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int uc_mem_map_size, wc_mem_map_size;
+ unsigned int min_vis, pio_write_vi_base, max_vis;
+ void __iomem *membase;
+ int rc;
+
+ min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+ /* Try to allocate PIO buffers if wanted and if the full
+ * number of PIO buffers would be sufficient to allocate one
+ * copy-buffer per TX channel. Failure is non-fatal, as there
+ * are only a small number of PIO buffers shared between all
+ * functions of the controller.
+ */
+ if (efx_piobuf_size != 0 &&
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ efx->n_tx_channels) {
+ unsigned int n_piobufs =
+ DIV_ROUND_UP(efx->n_tx_channels,
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+ rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "failed to allocate PIO buffers (%d)\n", rc);
+ else
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated %u PIO buffers\n", n_piobufs);
+ }
+#else
+ nic_data->n_piobufs = 0;
+#endif
- return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+ /* PIO buffers should be mapped with write-combining enabled,
+ * and we want to make single UC and WC mappings rather than
+ * several of each (in fact that's the only option if host
+ * page size is >4K). So we may allocate some extra VIs just
+ * for writing PIO buffers through.
+ */
+ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ ER_DZ_TX_PIOBUF);
+ if (nic_data->n_piobufs) {
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+ nic_data->n_piobufs) *
+ EFX_VI_PAGE_SIZE) -
+ uc_mem_map_size);
+ max_vis = pio_write_vi_base + nic_data->n_piobufs;
+ } else {
+ pio_write_vi_base = 0;
+ wc_mem_map_size = 0;
+ max_vis = min_vis;
+ }
+
+ /* In case the last attached driver failed to free VIs, do it now */
+ rc = efx_ef10_free_vis(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+ if (rc != 0)
+ return rc;
+
+ /* If we didn't get enough VIs to map all the PIO buffers, free the
+ * PIO buffers
+ */
+ if (nic_data->n_piobufs &&
+ nic_data->n_allocated_vis <
+ pio_write_vi_base + nic_data->n_piobufs) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%u VIs are not sufficient to map %u PIO buffers\n",
+ nic_data->n_allocated_vis, nic_data->n_piobufs);
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Shrink the original UC mapping of the memory BAR */
+ membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not shrink memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+
+ /* Set up the WC mapping if needed */
+ if (wc_mem_map_size) {
+ nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+ uc_mem_map_size,
+ wc_mem_map_size);
+ if (!nic_data->wc_membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not allocate WC mapping of size %x\n",
+ wc_mem_map_size);
+ return -ENOMEM;
+ }
+ nic_data->pio_write_vi_base = pio_write_vi_base;
+ nic_data->pio_write_base =
+ nic_data->wc_membase +
+ (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ uc_mem_map_size);
+
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+ &efx->membase_phys, efx->membase, uc_mem_map_size,
+ nic_data->wc_membase, wc_mem_map_size);
+
+ return 0;
}
static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_realloc_vis = false;
}
+ if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+ rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+ if (rc == 0) {
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Log an error on failure, but this is non-fatal */
+ if (rc)
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PIO buffers (%d)\n", rc);
+ nic_data->must_restore_piobufs = false;
+ }
+
efx_ef10_rx_push_indir_table(efx);
return 0;
}
@@ -759,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* The datapath firmware might have been changed */
@@ -2180,7 +2491,7 @@ out_unlock:
return rc;
}
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
{
/* no need to do anything here on EF10 */
}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index b3f4e3755fd9..207ac9a1e3de 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -315,6 +315,7 @@
#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
#define ESF_DZ_TX_PIO_OPT_LBN 60
#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_PIO 1
#define ESF_DZ_TX_PIO_CONT_LBN 59
#define ESF_DZ_TX_PIO_CONT_WIDTH 1
#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5771fe..b8235ee5d7d7 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -18,37 +18,36 @@
#define EFX_MEM_BAR 2
/* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
/* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
- unsigned int index, unsigned int n_frags,
- unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
static inline void efx_filter_rfs_expire(struct efx_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
/* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
/* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
- unsigned int rx_usecs, bool rx_adaptive,
- bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
- unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
/* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
- size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf5c323..1f529fa2edb1 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
return 0;
}
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96ce507d8602..4d3f119b67b3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,6 +66,11 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index c34d0d4e10ee..656a3277c2b2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
}
#endif
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual);
-
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
efx_dword_t *outbuf,
size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
-extern void efx_mcdi_process_event(struct efx_channel *channel,
- efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
/* We expect that 16- and 32-bit fields in MCDI requests and responses
* are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
- u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
- u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
- size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
- const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
#else
static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
#endif
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index 16824fecc5ee..4a2dc4c281b7 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -20,7 +20,7 @@
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
{
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
return sync;
}
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
*
* This function will sleep
*/
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
- int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
/* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
- int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+ unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
u8 efx_mdio_get_pause(struct efx_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
- unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
}
/* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b172ed133055..b14a717ac3e8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -141,6 +141,8 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
union {
@@ -154,6 +156,7 @@ struct efx_tx_buffer {
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
+ unsigned short dma_offset;
};
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
@@ -182,6 +185,9 @@ struct efx_tx_buffer {
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ * Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -209,6 +215,7 @@ struct efx_tx_buffer {
* blocks
* @tso_packets: Number of packets via the TSO xmit path
* @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +230,8 @@ struct efx_tx_queue {
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ void __iomem *piobuf;
+ unsigned int piobuf_offset;
bool initialised;
/* Members used mainly on the completion path */
@@ -238,6 +247,7 @@ struct efx_tx_queue {
unsigned int tso_long_headers;
unsigned int tso_packets;
unsigned int pushes;
+ unsigned int pio_packets;
/* Members shared between paths and sometimes updated */
unsigned int empty_read_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9826594c8a48..9c90bf56090f 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -19,6 +19,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
+#include "ef10_regs.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Register dump */
-#define REGISTER_REVISION_A 1
-#define REGISTER_REVISION_B 2
-#define REGISTER_REVISION_C 3
-#define REGISTER_REVISION_Z 3 /* latest revision */
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct efx_nic_reg {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
};
-#define REGISTER(name, min_rev, max_rev) { \
- FR_ ## min_rev ## max_rev ## _ ## name, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
}
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
+ REGISTER_DZ(BIU_HW_REV_ID),
+ REGISTER_DZ(MC_DB_LWRD),
+ REGISTER_DZ(MC_DB_HWRD),
};
struct efx_nic_reg_table {
u32 offset:24;
- u32 min_revision:2, max_revision:2;
+ u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
- REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
-#define REGISTER_TABLE(name, min_rev, max_rev) \
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
- name, FR_ ## min_rev ## max_rev ## _ ## name, \
- min_rev, max_rev, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
- FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
- A, A, 8, 1024),
+ F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
- B, Z, 8, 1024),
+ F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+ REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 890bbbe8320e..11b6112d9249 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push. May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+ return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
+ bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
+ return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -401,6 +417,12 @@ enum {
EF10_STAT_COUNT
};
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
/**
* struct efx_ef10_nic_data - EF10 architecture NIC state
* @mcdi_buf: DMA buffer for MCDI
@@ -409,6 +431,13 @@ enum {
* @n_allocated_vis: Number of VIs allocated to this function
* @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ * reboot
* @rx_rss_context: Firmware handle for our RSS context
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -424,6 +453,11 @@ struct efx_ef10_nic_data {
unsigned int n_allocated_vis;
bool must_realloc_vis;
bool must_restore_filters;
+ unsigned int n_piobufs;
+ void __iomem *wc_membase, *pio_write_base;
+ unsigned int pio_write_vi_base;
+ unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ bool must_restore_piobufs;
u32 rx_rss_context;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
@@ -475,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
return 1 << efx->vi_scale;
}
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
#else
@@ -512,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
#endif
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -541,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
**************************************************************************
*/
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -609,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 filter_id,
+ struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 *buf,
+ u32 size);
#ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
#endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -681,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
}
/* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
/* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -703,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
}
/* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
efx->type->rx_push_indir_table(efx);
}
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
@@ -728,24 +760,22 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_farch_test_registers(struct efx_nic *efx,
- const struct efx_farch_register_test *regs,
- size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask,
- u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
#define EFX_MAX_FLUSH_TIME 5000
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 45eeb7075156..803bf445c08e 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -15,7 +15,7 @@
*/
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a596725023f..8f09e686fc23 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -12,6 +12,7 @@
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const struct iphdr *ip;
const __be16 *ports;
+ __be16 ether_type;
int nhoff;
int rc;
- nhoff = skb_network_offset(skb);
+ /* The core RPS/RFS code has already parsed and validated
+ * VLAN, IP and transport headers. We assume they are in the
+ * header area.
+ */
if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
+ const struct vlan_hdr *vh =
+ (const struct vlan_hdr *)skb->data;
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
+ /* We can't filter on the IP 5-tuple and the vlan
+ * together, so just strip the vlan header and filter
+ * on the IP part.
*/
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+ ether_type = vh->h_vlan_encapsulated_proto;
+ nhoff = sizeof(struct vlan_hdr);
+ } else {
+ ether_type = skb->protocol;
+ nhoff = 0;
}
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
+ if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
+ spec.match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec.ether_type = ether_type;
+
+ if (ether_type == htons(ETH_P_IP)) {
+ const struct iphdr *ip =
+ (const struct iphdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ spec.ip_proto = ip->protocol;
+ spec.rem_host[0] = ip->saddr;
+ spec.loc_host[0] = ip->daddr;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ } else {
+ const struct ipv6hdr *ip6 =
+ (const struct ipv6hdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(*ip6) + 4);
+ spec.ip_proto = ip6->nexthdr;
+ memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+ memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+ ports = (const __be16 *)(ip6 + 1);
+ }
+
+ spec.rem_port = ports[0];
+ spec.loc_port = ports[1];
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
+ if (ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index 87698ae0bf75..a2f4a06ffa4e 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -43,13 +43,12 @@ struct efx_self_tests {
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
- const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
- struct efx_self_tests *tests,
- unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2ac91c5b5eea..c49d1fb16965 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -17,10 +17,46 @@
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
+#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
+#include "io.h"
#include "nic.h"
#include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
@@ -29,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
- buffer->unmap_len);
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
@@ -83,8 +118,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
- /* Possibly one more per segment for the alignment workaround */
- if (EFX_WORKAROUND_5391(efx))
+ /* Possibly one more per segment for the alignment workaround,
+ * or for option descriptors
+ */
+ if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +182,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
}
}
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+ int used;
+ u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+ memcpy_toio(*piobuf, data, block_len);
+ *piobuf += block_len;
+ len -= block_len;
+
+ if (len) {
+ data += block_len;
+ BUG_ON(copy_buf->used);
+ BUG_ON(len > sizeof(copy_buf->buf));
+ memcpy(copy_buf->buf, data, len);
+ copy_buf->used = len;
+ }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ if (copy_buf->used) {
+ /* if the copy buffer is partially full, fill it up and write */
+ int copy_to_buf =
+ min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+ memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+ copy_buf->used += copy_to_buf;
+
+ /* if we didn't fill it up then we're done for now */
+ if (copy_buf->used < sizeof(copy_buf->buf))
+ return;
+
+ memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ *piobuf += sizeof(copy_buf->buf);
+ data += copy_to_buf;
+ len -= copy_to_buf;
+ copy_buf->used = 0;
+ }
+
+ efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ /* if there's anything in it, write the whole buffer, including junk */
+ if (copy_buf->used)
+ memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+ u8 __iomem **piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int i;
+
+ efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+ copy_buf);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ skb_frag_size(f), copy_buf);
+ kunmap_atomic(vaddr);
+ }
+
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ u8 __iomem *piobuf = tx_queue->piobuf;
+
+ /* Copy to PIO buffer. Ensure the writes are padded to the end
+ * of a cache line, as this is required for write-combining to be
+ * effective on at least x86.
+ */
+
+ if (skb_shinfo(skb)->nr_frags) {
+ /* The size of the copy buffer will ensure all writes
+ * are the size of a cache line.
+ */
+ struct efx_short_copy_buffer copy_buf;
+
+ copy_buf.used = 0;
+
+ efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+ &piobuf, &copy_buf);
+ efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+ } else {
+ /* Pad the write to the size of a cache line.
+ * We can do this because we know the skb_shared_info sruct is
+ * after the source, and the destination buffer is big enough.
+ */
+ BUILD_BUG_ON(L1_CACHE_BYTES >
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ memcpy_toio(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
+ }
+
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+ ESF_DZ_TX_PIO_BUF_ADDR,
+ tx_queue->piobuf_offset);
+ ++tx_queue->pio_packets;
+ ++tx_queue->insert_count;
+ return buffer;
+}
+#endif /* EFX_USE_PIO */
+
/*
* Add a socket buffer to a TX queue
*
@@ -167,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- unsigned int len, unmap_len = 0, insert_ptr;
+ unsigned int len, unmap_len = 0;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
unsigned short dma_flags;
@@ -189,6 +365,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+ if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+ efx_nic_tx_is_empty(tx_queue) &&
+ efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+ buffer = efx_enqueue_skb_pio(tx_queue, skb);
+ dma_flags = EFX_TX_BUF_OPTION;
+ goto finish_packet;
+ }
+#endif
+
/* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
@@ -208,11 +395,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Add to TX queue, splitting across DMA boundaries */
do {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
- EFX_BUG_ON_PARANOID(buffer->flags);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
dma_len = efx_max_tx_len(efx, dma_addr);
if (likely(dma_len >= len))
@@ -230,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Transfer ownership of the unmapping to the final buffer */
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
unmap_len = 0;
/* Get address and size of next fragment */
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+finish_packet:
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
while (tx_queue->insert_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
}
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* @tcp_off: Offset of TCP header
* @header_len: Number of bytes of header
* @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ * descriptors
*
* The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
/* Output position */
unsigned out_len;
unsigned seqnum;
- unsigned ipv4_id;
+ u16 ipv4_id;
unsigned packet_space;
/* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
unsigned int tcp_off;
unsigned header_len;
unsigned int ip_base_len;
+ dma_addr_t header_dma_addr;
+ unsigned int header_unmap_len;
};
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
{
struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx;
- unsigned dma_len, insert_ptr;
+ unsigned dma_len;
EFX_BUG_ON_PARANOID(len <= 0);
while (1) {
- insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[insert_ptr];
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
++tx_queue->insert_count;
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
tx_queue->read_count >=
efx->txq_entries);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
- EFX_BUG_ON_PARANOID(buffer->flags);
-
buffer->dma_addr = dma_addr;
dma_len = efx_max_tx_len(efx, dma_addr);
@@ -796,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
return -ENOMEM;
}
buffer->unmap_len = buffer->len;
+ buffer->dma_offset = 0;
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
}
@@ -814,19 +999,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
- buffer = &tx_queue->buffer[tx_queue->insert_count &
- tx_queue->ptr_mask];
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
/* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
{
+ bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int header_len, in_len;
+ dma_addr_t dma_addr;
+
st->ip_off = skb_network_header(skb) - skb->data;
st->tcp_off = skb_transport_header(skb) - skb->data;
- st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ in_len = skb_headlen(skb) - header_len;
+ st->header_len = header_len;
+ st->in_len = in_len;
if (st->protocol == htons(ETH_P_IP)) {
st->ip_base_len = st->header_len - st->ip_off;
st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1033,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->out_len = skb->len - st->header_len;
- st->unmap_len = 0;
- st->dma_flags = 0;
+ st->out_len = skb->len - header_len;
+
+ if (!use_options) {
+ st->header_unmap_len = 0;
+
+ if (likely(in_len == 0)) {
+ st->dma_flags = 0;
+ st->unmap_len = 0;
+ return 0;
+ }
+
+ dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+ in_len, DMA_TO_DEVICE);
+ st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ st->dma_addr = dma_addr;
+ st->unmap_addr = dma_addr;
+ st->unmap_len = in_len;
+ } else {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ st->header_dma_addr = dma_addr;
+ st->header_unmap_len = skb_headlen(skb);
+ st->dma_flags = 0;
+ st->dma_addr = dma_addr + header_len;
+ st->unmap_len = 0;
+ }
+
+ return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
}
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1078,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
return -ENOMEM;
}
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
- const struct sk_buff *skb)
-{
- int hl = st->header_len;
- int len = skb_headlen(skb) - hl;
-
- st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
- len, DMA_TO_DEVICE);
- if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
- st->unmap_len = len;
- st->in_len = len;
- st->dma_addr = st->unmap_addr;
- return 0;
- }
- return -ENOMEM;
-}
-
/**
* tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -922,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
if (st->in_len == 0) {
/* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
+ buffer->dma_offset = buffer->unmap_len - buffer->len;
buffer->flags |= st->dma_flags;
st->unmap_len = 0;
}
@@ -944,55 +1145,98 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
struct tso_state *st)
{
struct efx_tx_buffer *buffer =
- &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
- struct tcphdr *tsoh_th;
- unsigned ip_length;
- u8 *header;
- int rc;
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+ u8 tcp_flags_clear;
- /* Allocate and insert a DMA-mapped header buffer. */
- header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
- if (!header)
- return -ENOMEM;
-
- tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
- /* Copy and update the headers. */
- memcpy(header, skb->data, st->header_len);
-
- tsoh_th->seq = htonl(st->seqnum);
- st->seqnum += skb_shinfo(skb)->gso_size;
- if (st->out_len > skb_shinfo(skb)->gso_size) {
- /* This packet will not finish the TSO burst. */
+ if (!is_last) {
st->packet_space = skb_shinfo(skb)->gso_size;
- tsoh_th->fin = 0;
- tsoh_th->psh = 0;
+ tcp_flags_clear = 0x09; /* mask out FIN and PSH */
} else {
- /* This packet will be the last in the TSO burst. */
st->packet_space = st->out_len;
- tsoh_th->fin = tcp_hdr(skb)->fin;
- tsoh_th->psh = tcp_hdr(skb)->psh;
+ tcp_flags_clear = 0x00;
}
- ip_length = st->ip_base_len + st->packet_space;
- if (st->protocol == htons(ETH_P_IP)) {
- struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+ if (!st->header_unmap_len) {
+ /* Allocate and insert a DMA-mapped header buffer. */
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+ int rc;
+
+ header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+ if (!header)
+ return -ENOMEM;
- tsoh_iph->tot_len = htons(ip_length);
+ tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->header_len);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+ ip_length = st->ip_base_len + st->packet_space;
+
+ if (st->protocol == htons(ETH_P_IP)) {
+ struct iphdr *tsoh_iph =
+ (struct iphdr *)(header + st->ip_off);
+
+ tsoh_iph->tot_len = htons(ip_length);
+ tsoh_iph->id = htons(st->ipv4_id);
+ } else {
+ struct ipv6hdr *tsoh_iph =
+ (struct ipv6hdr *)(header + st->ip_off);
+
+ tsoh_iph->payload_len = htons(ip_length);
+ }
- /* Linux leaves suitable gaps in the IP ID space for us to fill. */
- tsoh_iph->id = htons(st->ipv4_id);
- st->ipv4_id++;
+ rc = efx_tso_put_header(tx_queue, buffer, header);
+ if (unlikely(rc))
+ return rc;
} else {
- struct ipv6hdr *tsoh_iph =
- (struct ipv6hdr *)(header + st->ip_off);
+ /* Send the original headers with a TSO option descriptor
+ * in front
+ */
+ u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
- tsoh_iph->payload_len = htons(ip_length);
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+ ++tx_queue->insert_count;
+
+ /* We mapped the headers in tso_start(). Unmap them
+ * when the last segment is completed.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->dma_addr = st->header_dma_addr;
+ buffer->len = st->header_len;
+ if (is_last) {
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+ buffer->unmap_len = st->header_unmap_len;
+ buffer->dma_offset = 0;
+ /* Ensure we only unmap them once in case of a
+ * later DMA mapping error and rollback
+ */
+ st->header_unmap_len = 0;
+ } else {
+ buffer->flags = EFX_TX_BUF_CONT;
+ buffer->unmap_len = 0;
+ }
+ ++tx_queue->insert_count;
}
- rc = efx_tso_put_header(tx_queue, buffer, header);
- if (unlikely(rc))
- return rc;
+ st->seqnum += skb_shinfo(skb)->gso_size;
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ ++st->ipv4_id;
++tx_queue->tso_packets;
@@ -1023,12 +1267,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
- tso_start(&state, skb);
+ rc = tso_start(&state, efx, skb);
+ if (rc)
+ goto mem_err;
- /* Assume that skb header area contains exactly the headers, and
- * all payload is in the frag list.
- */
- if (skb_headlen(skb) == state.header_len) {
+ if (likely(state.in_len == 0)) {
/* Grab the first payload fragment. */
EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
frag_i = 0;
@@ -1037,9 +1280,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
if (rc)
goto mem_err;
} else {
- rc = tso_get_head_fragment(&state, efx, skb);
- if (rc)
- goto mem_err;
+ /* Payload starts in the header area. */
frag_i = -1;
}
@@ -1091,6 +1332,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
state.unmap_len, DMA_TO_DEVICE);
}
+ /* Free the header DMA mapping, if using option descriptors */
+ if (state.header_unmap_len)
+ dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+ state.header_unmap_len, DMA_TO_DEVICE);
+
efx_enqueue_unwind(tx_queue);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 770036bc2d87..513ed8b1ba58 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, 6);
+ memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
priv = netdev_priv(dev);
spin_lock_init(&priv->meth_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index ee18e6f7b4fe..acbbe48a519c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->phy_task);
unregister_netdev(dev);
sis190_release_board(pdev);
- pci_set_drvdata(pdev, NULL);
}
static struct pci_driver sis190_pci_driver = {
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 068fc44d37e1..753630f5d3d3 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
- BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+ BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64)
+ MN10300 || COLDFIRE || ARM64 || XTENSA)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 03b256af7ed5..8ae1f8a7bf38 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -91,9 +91,9 @@ static int rx_copybreak;
/* These identify the driver base version and may not be removed. */
static char version[] =
-DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
static char version2[] =
-" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
@@ -332,9 +332,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(KERN_INFO "%s%s", version, version2);
+ pr_info_once("%s%s\n", version, version2);
#endif
card_idx++;
@@ -423,9 +421,9 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
if (debug > 2) {
- dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
+ dev_dbg(&pdev->dev, "EEPROM contents:\n");
for (i = 0; i < 64; i++)
- printk(" %4.4x%s", read_eeprom(ep, i),
+ pr_cont(" %4.4x%s", read_eeprom(ep, i),
i % 16 == 15 ? "\n" : "");
}
@@ -490,10 +488,10 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err_out_unmap_rx;
- printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
- dev->name, pci_id_tbl[chip_idx].name,
- (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
+ pci_id_tbl[chip_idx].name,
+ (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
+ dev->dev_addr);
out:
return ret;
@@ -703,9 +701,8 @@ static int epic_open(struct net_device *dev)
mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
if (dev->if_port == 1) {
if (debug > 1)
- printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
- "status %4.4x.\n",
- dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+ netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
+ mdio_read(dev, ep->phys[0], MII_BMSR));
}
} else {
int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
@@ -715,10 +712,10 @@ static int epic_open(struct net_device *dev)
else if (! (mii_lpa & LPA_LPACK))
mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
if (debug > 1)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
- " register read of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half",
- ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
+ ep->mii.full_duplex ? "full"
+ : "half",
+ ep->phys[0], mii_lpa);
}
}
@@ -738,10 +735,9 @@ static int epic_open(struct net_device *dev)
TxUnderrun);
if (debug > 1) {
- printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
- "status %4.4x %s-duplex.\n",
- dev->name, ioaddr, irq, er32(GENCTL),
- ep->mii.full_duplex ? "full" : "half");
+ netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
+ ioaddr, irq, er32(GENCTL),
+ ep->mii.full_duplex ? "full" : "half");
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -790,8 +786,8 @@ static void epic_restart(struct net_device *dev)
/* Soft reset the chip. */
ew32(GENCTL, 0x4001);
- printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
- dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
udelay(1);
/* This magic is documented in SMSC app note 7.15 */
@@ -827,9 +823,8 @@ static void epic_restart(struct net_device *dev)
((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
TxUnderrun);
- printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
- " interrupt %4.4x.\n",
- dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
+ netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
+ er32(COMMAND), er32(GENCTL), er32(INTSTAT));
}
static void check_media(struct net_device *dev)
@@ -846,9 +841,9 @@ static void check_media(struct net_device *dev)
return;
if (ep->mii.full_duplex != duplex) {
ep->mii.full_duplex = duplex;
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
- " partner capability of %4.4x.\n", dev->name,
- ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
+ ep->mii.full_duplex ? "full" : "half",
+ ep->phys[0], mii_lpa);
ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
}
}
@@ -861,11 +856,10 @@ static void epic_timer(unsigned long data)
int next_tick = 5*HZ;
if (debug > 3) {
- printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
- dev->name, er32(TxSTAT));
- printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
- "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
- er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
+ netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
+ er32(TxSTAT));
+ netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
+ er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
}
check_media(dev);
@@ -880,11 +874,11 @@ static void epic_tx_timeout(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
if (debug > 0) {
- printk(KERN_WARNING "%s: Transmit timeout using MII device, "
- "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
+ netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
+ er16(TxSTAT));
if (debug > 1) {
- printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
- dev->name, ep->dirty_tx, ep->cur_tx);
+ netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
+ ep->dirty_tx, ep->cur_tx);
}
}
if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
@@ -994,9 +988,8 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
ew32(COMMAND, TxQueued);
if (debug > 4)
- printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
- "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
- entry, ctrl_word, er32(TxSTAT));
+ netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
+ skb->len, entry, ctrl_word, er32(TxSTAT));
return NETDEV_TX_OK;
}
@@ -1009,8 +1002,8 @@ static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
#ifndef final_version
/* There was an major error, log it. */
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
+ status);
#endif
stats->tx_errors++;
if (status & 0x1050)
@@ -1057,9 +1050,8 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
#ifndef final_version
if (cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_WARNING
- "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, cur_tx, ep->tx_full);
+ netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, cur_tx, ep->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -1086,8 +1078,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(INTSTAT, status & EpicNormalEvent);
if (debug > 4) {
- printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
- "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
+ netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
+ status, er32(INTSTAT));
}
if ((status & IntrSummary) == 0)
@@ -1125,8 +1117,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
ew32(COMMAND, RestartTx);
}
if (status & PCIBusErr170) {
- printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
- dev->name, status);
+ netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
+ status);
epic_pause(dev);
epic_restart(dev);
}
@@ -1136,8 +1128,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
out:
if (debug > 3) {
- printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
- dev->name, status);
+ netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
+ status);
}
return IRQ_RETVAL(handled);
@@ -1151,7 +1143,7 @@ static int epic_rx(struct net_device *dev, int budget)
int work_done = 0;
if (debug > 4)
- printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
ep->rx_ring[entry].rxstatus);
if (rx_work_limit > budget)
@@ -1162,16 +1154,17 @@ static int epic_rx(struct net_device *dev, int budget)
int status = ep->rx_ring[entry].rxstatus;
if (debug > 4)
- printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ netdev_dbg(dev, " epic_rx() status was %8.8x.\n",
+ status);
if (--rx_work_limit < 0)
break;
if (status & 0x2006) {
if (debug > 2)
- printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
+ status);
if (status & 0x2000) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, status %4.4x!\n", dev->name, status);
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
+ status);
dev->stats.rx_length_errors++;
} else if (status & 0x0006)
/* Rx Frame errors are counted in hardware. */
@@ -1183,9 +1176,8 @@ static int epic_rx(struct net_device *dev, int budget)
struct sk_buff *skb;
if (pkt_len > PKT_BUF_SZ - 4) {
- printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
- "%d bytes.\n",
- dev->name, status, pkt_len);
+ netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
+ status, pkt_len);
pkt_len = 1514;
}
/* Check if the packet is long enough to accept without copying
@@ -1305,8 +1297,8 @@ static int epic_close(struct net_device *dev)
napi_disable(&ep->napi);
if (debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, er32(INTSTAT));
+ netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
+ er32(INTSTAT));
del_timer_sync(&ep->timer);
@@ -1324,7 +1316,7 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].buflength = 0;
if (skb) {
pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1535,7 +1527,6 @@ static void epic_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* pci_power_off(pdev, -1); */
}
@@ -1588,8 +1579,7 @@ static int __init epic_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk (KERN_INFO "%s%s",
- version, version2);
+ pr_info("%s%s\n", version, version2);
#endif
return pci_register_driver(&epic_driver);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index afe01c4088a3..0f096a890059 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -106,16 +106,16 @@ MODULE_ALIAS("platform:smc911x");
#define POWER_DOWN 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG & (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while (0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG_PKTS > 0
@@ -130,21 +130,23 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
#define PRINT_PKT(x...) do { } while (0)
@@ -176,7 +178,7 @@ static void smc911x_reset(struct net_device *dev)
unsigned int reg, timeout=0, resets=1, irq_cfg;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Take out of PM setting first */
if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -188,7 +190,7 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
} while (--timeout && !reg);
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
return;
}
}
@@ -206,14 +208,14 @@ static void smc911x_reset(struct net_device *dev)
reg = SMC_GET_HW_CFG(lp);
/* If chip indicates reset timeout then try again */
if (reg & HW_CFG_SRST_TO_) {
- PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ PRINTK(dev, "chip reset timeout, retrying...\n");
resets++;
break;
}
} while (--timeout && (reg & HW_CFG_SRST_));
}
if (timeout == 0) {
- PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
return;
}
@@ -223,7 +225,7 @@ static void smc911x_reset(struct net_device *dev)
udelay(10);
if (timeout == 0){
- PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
return;
}
@@ -270,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
unsigned mask, cfg, cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -296,7 +298,7 @@ static void smc911x_enable(struct net_device *dev)
/* Turn on receiver and enable RX */
if (cr & MAC_CR_RXEN_)
- DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
@@ -327,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
unsigned cr;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
/* Disable IRQ's */
SMC_SET_INT_EN(lp, 0);
@@ -346,7 +348,8 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int fifo_count, timeout, reg;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
+ CARDNAME, __func__);
fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
if (fifo_count <= 4) {
/* Manually dump the packet data */
@@ -361,7 +364,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
} while (--timeout && reg);
if (timeout == 0) {
- PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ PRINTK(dev, "timeout waiting for RX fast forward\n");
}
}
}
@@ -379,11 +382,11 @@ static inline void smc911x_rcv(struct net_device *dev)
struct sk_buff *skb;
unsigned char *data;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
+ __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
- dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
+ (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
/* Deal with a bad packet */
@@ -403,8 +406,7 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Alloc a buffer with extra room for DMA alignment */
skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
- PRINTK( "%s: Low memory, rcvd packet dropped.\n",
- dev->name);
+ PRINTK(dev, "Low memory, rcvd packet dropped.\n");
dev->stats.rx_dropped++;
smc911x_drop_pkt(dev);
return;
@@ -422,8 +424,8 @@ static inline void smc911x_rcv(struct net_device *dev)
/* Lower the FIFO threshold if possible */
fifo = SMC_GET_FIFO_INT(lp);
if (fifo & 0xFF) fifo--;
- DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
/* Setup RX DMA */
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
@@ -436,7 +438,7 @@ static inline void smc911x_rcv(struct net_device *dev)
SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
SMC_PULL_DATA(lp, data, pkt_len+2+3);
- DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -456,7 +458,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
unsigned int cmdA, cmdB, len;
unsigned char *buf;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
BUG_ON(lp->pending_tx_skb == NULL);
skb = lp->pending_tx_skb;
@@ -481,12 +483,12 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* tag is packet length so we can use this in stats update later */
cmdB = (skb->len << 16) | (skb->len & 0x7FF);
- DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
- dev->name, len, len, buf, cmdA, cmdB);
+ DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ len, len, buf, cmdA, cmdB);
SMC_SET_TX_FIFO(lp, cmdA);
SMC_SET_TX_FIFO(lp, cmdB);
- DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
PRINT_PKT(buf, len <= 64 ? len : 64);
/* Send pkt via PIO or DMA */
@@ -517,20 +519,20 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int free;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
spin_lock_irqsave(&lp->lock, flags);
BUG_ON(lp->pending_tx_skb != NULL);
free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
- DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
/* Turn off the flow when running out of space in FIFO */
if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
- DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
- dev->name, free);
+ DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
+ free);
/* Reenable when at least 1 packet of size MTU present */
SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
lp->tx_throttle = 1;
@@ -545,8 +547,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* End padding 15 bytes
*/
if (unlikely(free < (skb->len + 8 + 15 + 15))) {
- printk("%s: No Tx free space %d < %d\n",
- dev->name, free, skb->len);
+ netdev_warn(dev, "No Tx free space %d < %d\n",
+ free, skb->len);
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
@@ -561,13 +563,13 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the DMA IRQ starts it
*/
if (lp->txdma_active) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
lp->pending_tx_skb = skb;
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
} else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
lp->txdma_active = 1;
}
}
@@ -589,20 +591,19 @@ static void smc911x_tx(struct net_device *dev)
struct smc911x_local *lp = netdev_priv(dev);
unsigned int tx_status;
- DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
- dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
+ __func__);
/* Collect the TX status */
while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
- DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
- dev->name,
- (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
+ DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
+ (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
tx_status = SMC_GET_TX_STS_FIFO(lp);
dev->stats.tx_packets++;
dev->stats.tx_bytes+=tx_status>>16;
- DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
- dev->name, (tx_status & 0xffff0000) >> 16,
- tx_status & 0x0000ffff);
+ DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
+ (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
/* count Tx errors, but ignore lost carrier errors when in
* full-duplex mode */
if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
@@ -640,8 +641,8 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
SMC_GET_MII(lp, phyreg, phyaddr, phydata);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __func__, phyaddr, phyreg, phydata);
return phydata;
}
@@ -654,8 +655,8 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SET_MII(lp, phyreg, phyaddr, phydata);
}
@@ -670,7 +671,7 @@ static void smc911x_phy_detect(struct net_device *dev)
int phyaddr;
unsigned int cfg, id1, id2;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
lp->phy_type = 0;
@@ -731,8 +732,8 @@ static void smc911x_phy_detect(struct net_device *dev)
lp->phy_type = id1 << 16 | id2;
}
- DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
- dev->name, id1, id2, lp->mii.phy_id);
+ DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ id1, id2, lp->mii.phy_id);
}
/*
@@ -745,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* Enter Link Disable state */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -792,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
unsigned long flags;
unsigned int reg;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
reg = SMC_GET_PMT_CTRL(lp);
@@ -851,18 +852,18 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
int phyaddr = lp->mii.phy_id;
unsigned int bmcr, cr;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
/* duplex state has changed */
SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
SMC_GET_MAC_CR(lp, cr);
if (lp->mii.full_duplex) {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
bmcr |= BMCR_FULLDPLX;
cr |= MAC_CR_RCVOWN_;
} else {
- DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
bmcr &= ~BMCR_FULLDPLX;
cr &= ~MAC_CR_RCVOWN_;
}
@@ -891,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
int status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
/*
* We should not be called if phy_type is zero.
@@ -900,7 +901,7 @@ static void smc911x_phy_configure(struct work_struct *work)
return;
if (smc911x_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
return;
}
spin_lock_irqsave(&lp->lock, flags);
@@ -922,7 +923,7 @@ static void smc911x_phy_configure(struct work_struct *work)
/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc911x_phy_fixed(dev);
goto smc911x_phy_configure_exit;
}
@@ -960,8 +961,8 @@ static void smc911x_phy_configure(struct work_struct *work)
udelay(10);
SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
- DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
- DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
+ DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -984,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int status;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -992,10 +993,10 @@ static void smc911x_phy_interrupt(struct net_device *dev)
smc911x_phy_check_media(dev, 0);
/* read to clear status bits */
SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
- DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
- dev->name, status & 0xffff);
- DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
- dev->name, SMC_GET_AFC_CFG(lp));
+ DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
+ status & 0xffff);
+ DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
+ SMC_GET_AFC_CFG(lp));
}
/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
@@ -1012,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
unsigned int rx_overrun=0, cr, pkts;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
@@ -1033,8 +1034,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
- dev->name, status, mask, status & ~mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ status, mask, status & ~mask);
status &= mask;
if (!status)
@@ -1066,7 +1067,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
SMC_GET_MAC_CR(lp, cr);
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1078,7 +1079,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
cr &= ~MAC_CR_RXEN_;
SMC_SET_MAC_CR(lp, cr);
rx_overrun=1;
- DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
}
@@ -1087,23 +1088,23 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle receive condition */
if ((status & INT_STS_RSFL_) || rx_overrun) {
unsigned int fifo;
- DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ DBG(SMC_DEBUG_RX, dev, "RX irq\n");
fifo = SMC_GET_RX_FIFO_INF(lp);
pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
- DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
- dev->name, pkts, fifo & 0xFFFF );
+ DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
+ pkts, fifo & 0xFFFF);
if (pkts != 0) {
#ifdef SMC_USE_DMA
unsigned int fifo;
if (lp->rxdma_active){
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA active\n", dev->name);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA active\n");
/* The DMA is already running so up the IRQ threshold */
fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
fifo |= pkts & 0xFF;
- DBG(SMC_DEBUG_RX,
- "%s: Setting RX stat FIFO threshold to %d\n",
- dev->name, fifo & 0xff);
+ DBG(SMC_DEBUG_RX, dev,
+ "Setting RX stat FIFO threshold to %d\n",
+ fifo & 0xff);
SMC_SET_FIFO_INT(lp, fifo);
} else
#endif
@@ -1113,7 +1114,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
/* Handle transmit FIFO available */
if (status & INT_STS_TDFA_) {
- DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
SMC_SET_FIFO_TDA(lp, 0xFF);
lp->tx_throttle = 0;
#ifdef SMC_USE_DMA
@@ -1125,9 +1126,9 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle transmit done condition */
#if 1
if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
- "%s: Tx stat FIFO limit (%d) /GPT irq\n",
- dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
+ "Tx stat FIFO limit (%d) /GPT irq\n",
+ (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
smc911x_tx(dev);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_TSFL_);
@@ -1135,23 +1136,20 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
+ DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
if (status & INT_STS_GPT_INT_) {
- DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
- dev->name,
- SMC_GET_IRQ_CFG(lp),
- SMC_GET_FIFO_INT(lp),
- SMC_GET_RX_CFG(lp));
- DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
- "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
- dev->name,
- (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
- SMC_GET_RX_FIFO_INF(lp) & 0xffff,
- SMC_GET_RX_STS_FIFO_PEEK(lp));
+ DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ SMC_GET_IRQ_CFG(lp),
+ SMC_GET_FIFO_INT(lp),
+ SMC_GET_RX_CFG(lp));
+ DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF(lp) & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK(lp));
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
SMC_ACK_INT(lp, INT_STS_GPT_INT_);
}
@@ -1159,7 +1157,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* Handle PHY interrupt condition */
if (status & INT_STS_PHY_INT_) {
- DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
smc911x_phy_interrupt(dev);
SMC_ACK_INT(lp, INT_STS_PHY_INT_);
}
@@ -1168,8 +1166,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
/* restore mask state */
SMC_SET_INT_EN(lp, mask);
- DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
- dev->name, 8-timeout);
+ DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
+ 8-timeout);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1185,9 +1183,9 @@ smc911x_tx_dma_irq(int dma, void *data)
struct sk_buff *skb = lp->current_tx_skb;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
BUG_ON(skb == NULL);
@@ -1198,8 +1196,8 @@ smc911x_tx_dma_irq(int dma, void *data)
if (lp->pending_tx_skb != NULL)
smc911x_hardware_send_pkt(dev);
else {
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "No pending Tx packets. DMA disabled\n");
spin_lock_irqsave(&lp->lock, flags);
lp->txdma_active = 0;
if (!lp->tx_throttle) {
@@ -1208,8 +1206,8 @@ smc911x_tx_dma_irq(int dma, void *data)
spin_unlock_irqrestore(&lp->lock, flags);
}
- DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
- "%s: TX DMA irq completed\n", dev->name);
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
+ "TX DMA irq completed\n");
}
static void
smc911x_rx_dma_irq(int dma, void *data)
@@ -1221,8 +1219,8 @@ smc911x_rx_dma_irq(int dma, void *data)
unsigned long flags;
unsigned int pkts;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
/* Clear the DMA interrupt sources */
SMC_DMA_ACK_IRQ(dev, dma);
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
@@ -1242,9 +1240,9 @@ smc911x_rx_dma_irq(int dma, void *data)
lp->rxdma_active = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
- "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
- dev->name, pkts);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
+ "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ pkts);
}
#endif /* SMC_USE_DMA */
@@ -1268,14 +1266,14 @@ static void smc911x_timeout(struct net_device *dev)
int status, mask;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
- dev->name, status, mask);
+ DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
+ status, mask);
/* Dump the current TX FIFO contents and restart */
mask = SMC_GET_TX_CFG(lp);
@@ -1306,7 +1304,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
unsigned int mcr, update_multicast = 0;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, mcr);
@@ -1314,7 +1312,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
mcr |= MAC_CR_PRMS_;
}
/*
@@ -1323,7 +1321,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
mcr |= MAC_CR_MCPAS_;
}
@@ -1363,8 +1361,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
- dev->name);
+ DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
/*
@@ -1378,9 +1375,9 @@ static void smc911x_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_CR(lp, mcr);
if (update_multicast) {
- DBG(SMC_DEBUG_MISC,
- "%s: update mcast hash table 0x%08x 0x%08x\n",
- dev->name, multicast_table[0], multicast_table[1]);
+ DBG(SMC_DEBUG_MISC, dev,
+ "update mcast hash table 0x%08x 0x%08x\n",
+ multicast_table[0], multicast_table[1]);
SMC_SET_HASHL(lp, multicast_table[0]);
SMC_SET_HASHH(lp, multicast_table[1]);
}
@@ -1398,7 +1395,7 @@ smc911x_open(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* reset the hardware */
smc911x_reset(dev);
@@ -1425,7 +1422,7 @@ static int smc911x_close(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1459,7 +1456,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
int ret, status;
unsigned long flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cmd->maxtxpkt = 1;
cmd->maxrxpkt = 1;
@@ -1597,16 +1594,16 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
e2p_cmd = SMC_GET_E2P_CMD(lp);
for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
- PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
+ __func__);
return -EFAULT;
}
mdelay(1);
e2p_cmd = SMC_GET_E2P_CMD(lp);
}
if (timeout == 0) {
- PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
- dev->name, __func__);
+ PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
+ __func__);
return -ETIMEDOUT;
}
return 0;
@@ -1719,7 +1716,7 @@ static int smc911x_findirq(struct net_device *dev)
int timeout = 20;
unsigned long cookie;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
cookie = probe_irq_on();
@@ -1799,13 +1796,14 @@ static int smc911x_probe(struct net_device *dev)
const char *version_string;
unsigned long irq_flags;
- DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
/* First, see if the endian word is recognized */
val = SMC_GET_BYTE_TEST(lp);
- DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
+ CARDNAME, val);
if (val != 0x87654321) {
- printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
+ netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
retval = -ENODEV;
goto err_out;
}
@@ -1816,26 +1814,29 @@ static int smc911x_probe(struct net_device *dev)
* as future revisions could be added.
*/
chip_id = SMC_GET_PN(lp);
- DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
+ CARDNAME, chip_id);
for(i=0;chip_ids[i].id != 0; i++) {
if (chip_ids[i].id == chip_id) break;
}
if (!chip_ids[i].id) {
- printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
retval = -ENODEV;
goto err_out;
}
version_string = chip_ids[i].name;
revision = SMC_GET_REV(lp);
- DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+ DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
/* At this point I'll assume that the chip is an SMC911x. */
- DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+ DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
+ CARDNAME, chip_ids[i].name);
/* Validate the TX FIFO size requested */
if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
- printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ netdev_err(dev, "Invalid TX FIFO size requested %d\n",
+ tx_fifo_kb);
retval = -EINVAL;
goto err_out;
}
@@ -1887,14 +1888,13 @@ static int smc911x_probe(struct net_device *dev)
case 14:/* 1920 Rx Data Fifo Size */
lp->afc_cfg=0x0006032F;break;
default:
- PRINTK("%s: ERROR -- no AFC_CFG setting found",
- dev->name);
+ PRINTK(dev, "ERROR -- no AFC_CFG setting found");
break;
}
- DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
- "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
- lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
spin_lock_init(&lp->lock);
@@ -1924,8 +1924,7 @@ static int smc911x_probe(struct net_device *dev)
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -1980,33 +1979,32 @@ static int smc911x_probe(struct net_device *dev)
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %#lx IRQ %d",
- dev->name, version_string, lp->revision,
- dev->base_addr, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
+ version_string, lp->revision,
+ dev->base_addr, dev->irq);
#ifdef SMC_USE_DMA
if (lp->rxdma != -1)
- printk(" RXDMA %d ", lp->rxdma);
+ pr_cont(" RXDMA %d", lp->rxdma);
if (lp->txdma != -1)
- printk("TXDMA %d", lp->txdma);
+ pr_cont(" TXDMA %d", lp->txdma);
#endif
- printk("\n");
+ pr_cont("\n");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
- PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ PRINTK(dev, "LAN911x Internal PHY\n");
} else {
- PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
}
}
@@ -2025,7 +2023,7 @@ err_out:
}
/*
- * smc911x_init(void)
+ * smc911x_drv_probe(void)
*
* Output:
* 0 --> there is a device
@@ -2039,6 +2037,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
+ /* ndev is not valid yet, so avoid passing it in. */
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -2093,7 +2092,7 @@ release_both:
release_1:
release_mem_region(res->start, SMC911X_IO_EXTENT);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
}
#ifdef SMC_USE_DMA
else {
@@ -2111,7 +2110,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
struct smc911x_local *lp = netdev_priv(ndev);
struct resource *res;
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
unregister_netdev(ndev);
@@ -2140,7 +2139,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
struct net_device *ndev = platform_get_drvdata(dev);
struct smc911x_local *lp = netdev_priv(ndev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
if (netif_running(ndev)) {
netif_device_detach(ndev);
@@ -2158,7 +2157,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
{
struct net_device *ndev = platform_get_drvdata(dev);
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
if (ndev) {
struct smc911x_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index d51261ba4642..9965da39281b 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -227,7 +227,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
#define SMC_DMA_ACK_IRQ(dev, dma) \
{ \
if (DCSR(dma) & DCSR_BUSERR) { \
- printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ netdev_err(dev, "DMA %d bus error!\n", dma); \
} \
DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index afd9873e9bdb..67d9fdeedd86 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -55,7 +55,7 @@
----------------------------------------------------------------------------*/
static const char version[] =
- "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+ "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)";
#include <linux/module.h>
#include <linux/kernel.h>
@@ -597,7 +597,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
+ netdev_dbg(dev, CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -610,7 +610,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* point to the beginning of the packet */
outw( PTR_AUTOINC , ioaddr + POINTER );
- PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length));
#if SMC_DEBUG > 2
print_packet( buf, length );
#endif
@@ -846,7 +846,6 @@ static const struct net_device_ops smc_netdev_ops = {
static int __init smc_probe(struct net_device *dev, int ioaddr)
{
int i, memory, retval;
- static unsigned version_printed;
unsigned int bank;
const char *version_string;
@@ -913,8 +912,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
It might be prudent to check a listing of MAC addresses
against the hardware address, or do some other tests. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = ioaddr;
@@ -1003,21 +1001,21 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
/* now, print out the card info, in a short format.. */
- printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
- version_string, revision_register & 0xF, ioaddr, dev->irq,
- if_string, memory );
+ netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory);
/*
. Print the Ethernet address
*/
- printk("ADDR: %pM\n", dev->dev_addr);
+ netdev_info(dev, "ADDR: %pM\n", dev->dev_addr);
/* Grab the IRQ */
- retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
- dev->irq, retval);
- goto err_out;
- }
+ retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
+ DRV_NAME, dev->irq, retval);
+ goto err_out;
+ }
dev->netdev_ops = &smc_netdev_ops;
dev->watchdog_timeo = HZ/20;
@@ -1037,30 +1035,32 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d\n", length);
+ pr_dbg("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
for ( i = 0; i < lines ; i ++ ) {
int cur;
+ printk(KERN_DEBUG);
for ( cur = 0; cur < 8; cur ++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for ( i = 0; i < remainder/2 ; i++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
- printk("%02x%02x ", a, b );
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
#endif
}
#endif
@@ -1127,9 +1127,8 @@ static void smc_timeout(struct net_device *dev)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
- printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
- tx_done(dev) ? "IRQ conflict" :
- "network cable problem");
+ netdev_warn(dev, CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
@@ -1299,8 +1298,7 @@ static void smc_tx( struct net_device * dev )
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
if ( tx_status & TS_LATCOL ) {
- printk(KERN_DEBUG CARDNAME
- ": Late collision occurred on last xmit.\n");
+ netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n");
dev->stats.tx_window_errors++;
}
#if 0
@@ -1308,7 +1306,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt\n");
+ netdev_info(dev, CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1547,9 +1545,7 @@ int __init init_module(void)
/* copy the parameters from insmod into the device structure */
devSMC9194 = smc_init(-1);
- if (IS_ERR(devSMC9194))
- return PTR_ERR(devSMC9194);
- return 0;
+ return PTR_ERR_OR_ZERO(devSMC9194);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 656d2e2ebfc9..8ef70d9c20c1 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -740,7 +740,7 @@ static int smc91c92_resume(struct pcmcia_device *link)
(smc->cardid == PRODID_PSION_NET100))) {
i = osi_load_firmware(link);
if (i) {
- pr_err("smc91c92_cs: Failed to load firmware\n");
+ netdev_err(dev, "Failed to load firmware\n");
return i;
}
}
@@ -793,7 +793,7 @@ static int check_sig(struct pcmcia_device *link)
}
if (width) {
- pr_info("using 8-bit IO window\n");
+ netdev_info(dev, "using 8-bit IO window\n");
smc91c92_suspend(link);
pcmcia_fixup_iowidth(link);
@@ -1036,7 +1036,7 @@ static void smc_dump(struct net_device *dev)
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
SMC_SELECT_BANK(w);
- netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
+ netdev_dbg(dev, "bank %d: ", w);
for (i = 0; i < 14; i += 2)
pr_cont(" %04x", inw(ioaddr + i));
pr_cont("\n");
@@ -1213,8 +1213,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
if (smc->saved_skb) {
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
- netdev_printk(KERN_DEBUG, dev,
- "Internal error -- sent packet while busy\n");
+ netdev_dbg(dev, "Internal error -- sent packet while busy\n");
return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
@@ -1254,7 +1253,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
}
/* Otherwise defer until the Tx-space-allocated interrupt. */
- pr_debug("%s: memory allocation deferred.\n", dev->name);
+ netdev_dbg(dev, "memory allocation deferred.\n");
outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
spin_unlock_irqrestore(&smc->lock, flags);
@@ -1317,8 +1316,8 @@ static void smc_eph_irq(struct net_device *dev)
SMC_SELECT_BANK(0);
ephs = inw(ioaddr + EPH);
- pr_debug("%s: Ethernet protocol handler interrupt, status"
- " %4.4x.\n", dev->name, ephs);
+ netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n",
+ ephs);
/* Could be a counter roll-over warning: update stats. */
card_stats = inw(ioaddr + COUNTER);
/* single collisions */
@@ -1357,8 +1356,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
ioaddr = dev->base_addr;
- pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
- irq, ioaddr);
+ netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n",
+ irq, ioaddr);
spin_lock(&smc->lock);
smc->watchdog = 0;
@@ -1366,8 +1365,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if ((saved_bank & 0xff00) != 0x3300) {
/* The device does not exist -- the card could be off-line, or
maybe it has been ejected. */
- pr_debug("%s: SMC91c92 interrupt %d for non-existent"
- "/ejected device.\n", dev->name, irq);
+ netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n",
+ irq);
handled = 0;
goto irq_done;
}
@@ -1380,8 +1379,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do { /* read the status flag, and mask it */
status = inw(ioaddr + INTERRUPT) & 0xff;
- pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
- status, mask);
+ netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n",
+ status, mask);
if ((status & mask) == 0) {
if (bogus_cnt == INTR_WORK)
handled = 0;
@@ -1425,15 +1424,15 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_eph_irq(dev);
} while (--bogus_cnt);
- pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
- " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+ netdev_dbg(dev, " Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n",
+ mask, saved_bank, saved_pointer);
/* restore state register */
outw((mask<<8), ioaddr + INTERRUPT);
outw(saved_pointer, ioaddr + POINTER);
SMC_SELECT_BANK(saved_bank);
- pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+ netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq);
irq_done:
@@ -1491,10 +1490,10 @@ static void smc_rx(struct net_device *dev)
rx_status = inw(ioaddr + DATA_1);
packet_length = inw(ioaddr + DATA_1) & 0x07ff;
- pr_debug("%s: Receive status %4.4x length %d.\n",
- dev->name, rx_status, packet_length);
+ netdev_dbg(dev, "Receive status %4.4x length %d.\n",
+ rx_status, packet_length);
- if (!(rx_status & RS_ERRORS)) {
+ if (!(rx_status & RS_ERRORS)) {
/* do stuff to make a new packet */
struct sk_buff *skb;
@@ -1502,7 +1501,7 @@ static void smc_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, packet_length+2);
if (skb == NULL) {
- pr_debug("%s: Low memory, packet dropped.\n", dev->name);
+ netdev_dbg(dev, "Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
outw(MC_RELEASE, ioaddr + MMU_CMD);
return;
@@ -1643,7 +1642,7 @@ static void smc_reset(struct net_device *dev)
struct smc_private *smc = netdev_priv(dev);
int i;
- pr_debug("%s: smc91c92 reset called.\n", dev->name);
+ netdev_dbg(dev, "smc91c92 reset called.\n");
/* The first interaction must be a write to bring the chip out
of sleep mode. */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73be7f3982e6..0c9b5d94154f 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -58,7 +58,7 @@
* 22/09/04 Nicolas Pitre big update (see commit log for details)
*/
static const char version[] =
- "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n";
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>";
/* Debugging level */
#ifndef SMC_DEBUG
@@ -149,16 +149,16 @@ MODULE_ALIAS("platform:smc91x");
#define MII_DELAY 1
#if SMC_DEBUG > 0
-#define DBG(n, args...) \
+#define DBG(n, dev, args...) \
do { \
if (SMC_DEBUG >= (n)) \
- printk(args); \
+ netdev_dbg(dev, args); \
} while (0)
-#define PRINTK(args...) printk(args)
+#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, args...) do { } while(0)
-#define PRINTK(args...) printk(KERN_DEBUG args)
+#define DBG(n, dev, args...) do { } while (0)
+#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
#if SMC_DEBUG > 3
@@ -173,24 +173,26 @@ static void PRINT_PKT(u_char *buf, int length)
for (i = 0; i < lines ; i ++) {
int cur;
+ printk(KERN_DEBUG);
for (cur = 0; cur < 8; cur++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
+ printk(KERN_DEBUG);
for (i = 0; i < remainder/2 ; i++) {
u_char a, b;
a = *buf++;
b = *buf++;
- printk("%02x%02x ", a, b);
+ pr_cont("%02x%02x ", a, b);
}
- printk("\n");
+ pr_cont("\n");
}
#else
-#define PRINT_PKT(x...) do { } while(0)
+#define PRINT_PKT(x...) do { } while (0)
#endif
@@ -226,8 +228,8 @@ static void PRINT_PKT(u_char *buf, int length)
unsigned long timeout = jiffies + 2; \
while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
if (time_after(jiffies, timeout)) { \
- printk("%s: timeout %s line %d\n", \
- dev->name, __FILE__, __LINE__); \
+ netdev_dbg(dev, "timeout %s line %d\n", \
+ __FILE__, __LINE__); \
break; \
} \
cpu_relax(); \
@@ -246,7 +248,7 @@ static void smc_reset(struct net_device *dev)
unsigned int ctl, cfg;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Disable all interrupts, block TX tasklet */
spin_lock_irq(&lp->lock);
@@ -339,7 +341,7 @@ static void smc_enable(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int mask;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* see the header file for options in TCR/RCR DEFAULT */
SMC_SELECT_BANK(lp, 0);
@@ -373,7 +375,7 @@ static void smc_shutdown(struct net_device *dev)
void __iomem *ioaddr = lp->base;
struct sk_buff *pending_skb;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* no more interrupts for me */
spin_lock_irq(&lp->lock);
@@ -406,11 +408,11 @@ static inline void smc_rcv(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int packet_number, status, packet_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
packet_number = SMC_GET_RXFIFO(lp);
if (unlikely(packet_number & RXFIFO_REMPTY)) {
- PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_rcv with nothing on FIFO.\n");
return;
}
@@ -420,9 +422,8 @@ static inline void smc_rcv(struct net_device *dev)
/* First two words are status and packet length */
SMC_GET_PKT_HDR(lp, status, packet_len);
packet_len &= 0x07ff; /* mask off top bits */
- DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
- dev->name, packet_number, status,
- packet_len, packet_len);
+ DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+ packet_number, status, packet_len, packet_len);
back:
if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
@@ -433,8 +434,8 @@ static inline void smc_rcv(struct net_device *dev)
}
if (packet_len < 6) {
/* bloody hardware */
- printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
- dev->name, packet_len, status);
+ netdev_err(dev, "fubar (rxlen %u status %x\n",
+ packet_len, status);
status |= RS_TOOSHORT;
}
SMC_WAIT_MMU_BUSY(lp);
@@ -551,7 +552,7 @@ static void smc_hardware_send_pkt(unsigned long data)
unsigned char *buf;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
if (!smc_special_trylock(&lp->lock, flags)) {
netif_stop_queue(dev);
@@ -568,7 +569,7 @@ static void smc_hardware_send_pkt(unsigned long data)
packet_no = SMC_GET_AR(lp);
if (unlikely(packet_no & AR_FAILED)) {
- printk("%s: Memory allocation failed.\n", dev->name);
+ netdev_err(dev, "Memory allocation failed.\n");
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
smc_special_unlock(&lp->lock, flags);
@@ -581,8 +582,8 @@ static void smc_hardware_send_pkt(unsigned long data)
buf = skb->data;
len = skb->len;
- DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
- dev->name, packet_no, len, len, buf);
+ DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+ packet_no, len, len, buf);
PRINT_PKT(buf, len);
/*
@@ -637,7 +638,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int numPages, poll_count, status;
unsigned long flags;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
BUG_ON(lp->pending_tx_skb != NULL);
@@ -654,7 +655,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
if (unlikely(numPages > 7)) {
- printk("%s: Far too big packet error.\n", dev->name);
+ netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -685,7 +686,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!poll_count) {
/* oh well, wait until the chip finds memory later */
netif_stop_queue(dev);
- DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+ DBG(2, dev, "TX memory allocation deferred.\n");
SMC_ENABLE_INT(lp, IM_ALLOC_INT);
} else {
/*
@@ -709,12 +710,12 @@ static void smc_tx(struct net_device *dev)
void __iomem *ioaddr = lp->base;
unsigned int saved_packet, packet_no, tx_status, pkt_len;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* If the TX FIFO is empty then nothing to do */
packet_no = SMC_GET_TXFIFO(lp);
if (unlikely(packet_no & TXFIFO_TEMPTY)) {
- PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+ PRINTK(dev, "smc_tx with nothing on FIFO.\n");
return;
}
@@ -725,8 +726,8 @@ static void smc_tx(struct net_device *dev)
/* read the first word (status word) from this packet */
SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
- DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
- dev->name, tx_status, packet_no);
+ DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n",
+ tx_status, packet_no);
if (!(tx_status & ES_TX_SUC))
dev->stats.tx_errors++;
@@ -735,14 +736,12 @@ static void smc_tx(struct net_device *dev)
dev->stats.tx_carrier_errors++;
if (tx_status & (ES_LATCOL | ES_16COL)) {
- PRINTK("%s: %s occurred on last xmit\n", dev->name,
+ PRINTK(dev, "%s occurred on last xmit\n",
(tx_status & ES_LATCOL) ?
"late collision" : "too many collisions");
dev->stats.tx_window_errors++;
if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
- printk(KERN_INFO "%s: unexpectedly large number of "
- "bad collisions. Please check duplex "
- "setting.\n", dev->name);
+ netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n");
}
}
@@ -830,8 +829,8 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
return phydata;
@@ -857,8 +856,8 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
/* Return to idle state */
SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
- DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
- __func__, phyaddr, phyreg, phydata);
+ DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
SMC_SELECT_BANK(lp, 2);
}
@@ -871,7 +870,7 @@ static void smc_phy_detect(struct net_device *dev)
struct smc_local *lp = netdev_priv(dev);
int phyaddr;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
lp->phy_type = 0;
@@ -886,8 +885,8 @@ static void smc_phy_detect(struct net_device *dev)
id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
- DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
- dev->name, id1, id2);
+ DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n",
+ id1, id2);
/* Make sure it is a valid identifier */
if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
@@ -910,7 +909,7 @@ static int smc_phy_fixed(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int bmcr, cfg1;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
/* Enter Link Disable state */
cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1044,7 +1043,7 @@ static void smc_phy_configure(struct work_struct *work)
int my_ad_caps; /* My Advertised capabilities */
int status;
- DBG(3, "%s:smc_program_phy()\n", dev->name);
+ DBG(3, dev, "smc_program_phy()\n");
spin_lock_irq(&lp->lock);
@@ -1055,7 +1054,7 @@ static void smc_phy_configure(struct work_struct *work)
goto smc_phy_configure_exit;
if (smc_phy_reset(dev, phyaddr)) {
- printk("%s: PHY reset timed out\n", dev->name);
+ netdev_info(dev, "PHY reset timed out\n");
goto smc_phy_configure_exit;
}
@@ -1082,7 +1081,7 @@ static void smc_phy_configure(struct work_struct *work)
my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
- printk(KERN_INFO "Auto negotiation NOT supported\n");
+ netdev_info(dev, "Auto negotiation NOT supported\n");
smc_phy_fixed(dev);
goto smc_phy_configure_exit;
}
@@ -1118,8 +1117,8 @@ static void smc_phy_configure(struct work_struct *work)
*/
status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
- DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
- DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+ DBG(2, dev, "phy caps=%x\n", my_phy_caps);
+ DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
@@ -1143,7 +1142,7 @@ static void smc_phy_interrupt(struct net_device *dev)
int phyaddr = lp->mii.phy_id;
int phy18;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (lp->phy_type == 0)
return;
@@ -1179,8 +1178,8 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
netif_carrier_on(dev);
}
if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link %s\n", dev->name,
- new_carrier ? "up" : "down");
+ netdev_info(dev, "link %s\n",
+ new_carrier ? "up" : "down");
}
}
@@ -1211,7 +1210,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
int status, mask, timeout, card_stats;
int saved_pointer;
- DBG(3, "%s: %s\n", dev->name, __func__);
+ DBG(3, dev, "%s\n", __func__);
spin_lock(&lp->lock);
@@ -1230,12 +1229,12 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
do {
status = SMC_GET_INT(lp);
- DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
- dev->name, status, mask,
- ({ int meminfo; SMC_SELECT_BANK(lp, 0);
- meminfo = SMC_GET_MIR(lp);
- SMC_SELECT_BANK(lp, 2); meminfo; }),
- SMC_GET_FIFO(lp));
+ DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ status, mask,
+ ({ int meminfo; SMC_SELECT_BANK(lp, 0);
+ meminfo = SMC_GET_MIR(lp);
+ SMC_SELECT_BANK(lp, 2); meminfo; }),
+ SMC_GET_FIFO(lp));
status &= mask;
if (!status)
@@ -1243,20 +1242,20 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
if (status & IM_TX_INT) {
/* do this before RX as it will free memory quickly */
- DBG(3, "%s: TX int\n", dev->name);
+ DBG(3, dev, "TX int\n");
smc_tx(dev);
SMC_ACK_INT(lp, IM_TX_INT);
if (THROTTLE_TX_PKTS)
netif_wake_queue(dev);
} else if (status & IM_RCV_INT) {
- DBG(3, "%s: RX irq\n", dev->name);
+ DBG(3, dev, "RX irq\n");
smc_rcv(dev);
} else if (status & IM_ALLOC_INT) {
- DBG(3, "%s: Allocation irq\n", dev->name);
+ DBG(3, dev, "Allocation irq\n");
tasklet_hi_schedule(&lp->tx_task);
mask &= ~IM_ALLOC_INT;
} else if (status & IM_TX_EMPTY_INT) {
- DBG(3, "%s: TX empty\n", dev->name);
+ DBG(3, dev, "TX empty\n");
mask &= ~IM_TX_EMPTY_INT;
/* update stats */
@@ -1271,10 +1270,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
/* multiple collisions */
dev->stats.collisions += card_stats & 0xF;
} else if (status & IM_RX_OVRN_INT) {
- DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
- ({ int eph_st; SMC_SELECT_BANK(lp, 0);
- eph_st = SMC_GET_EPH_STATUS(lp);
- SMC_SELECT_BANK(lp, 2); eph_st; }));
+ DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n",
+ ({ int eph_st; SMC_SELECT_BANK(lp, 0);
+ eph_st = SMC_GET_EPH_STATUS(lp);
+ SMC_SELECT_BANK(lp, 2); eph_st; }));
SMC_ACK_INT(lp, IM_RX_OVRN_INT);
dev->stats.rx_errors++;
dev->stats.rx_fifo_errors++;
@@ -1285,7 +1284,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
+ PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n");
}
} while (--timeout);
@@ -1296,11 +1295,11 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
#ifndef CONFIG_NET_POLL_CONTROLLER
if (timeout == MAX_IRQ_LOOPS)
- PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
- dev->name, mask);
+ PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n",
+ mask);
#endif
- DBG(3, "%s: Interrupt done (%d loops)\n",
- dev->name, MAX_IRQ_LOOPS - timeout);
+ DBG(3, dev, "Interrupt done (%d loops)\n",
+ MAX_IRQ_LOOPS - timeout);
/*
* We return IRQ_HANDLED unconditionally here even if there was
@@ -1333,7 +1332,7 @@ static void smc_timeout(struct net_device *dev)
void __iomem *ioaddr = lp->base;
int status, mask, eph_st, meminfo, fifo;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
spin_lock_irq(&lp->lock);
status = SMC_GET_INT(lp);
@@ -1344,9 +1343,8 @@ static void smc_timeout(struct net_device *dev)
meminfo = SMC_GET_MIR(lp);
SMC_SELECT_BANK(lp, 2);
spin_unlock_irq(&lp->lock);
- PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
- "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
- dev->name, status, mask, meminfo, fifo, eph_st );
+ PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+ status, mask, meminfo, fifo, eph_st);
smc_reset(dev);
smc_enable(dev);
@@ -1377,10 +1375,10 @@ static void smc_set_multicast_list(struct net_device *dev)
unsigned char multicast_table[8];
int update_multicast = 0;
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
if (dev->flags & IFF_PROMISC) {
- DBG(2, "%s: RCR_PRMS\n", dev->name);
+ DBG(2, dev, "RCR_PRMS\n");
lp->rcr_cur_mode |= RCR_PRMS;
}
@@ -1395,7 +1393,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
- DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ DBG(2, dev, "RCR_ALMUL\n");
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1437,7 +1435,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* now, the table can be loaded into the chipset */
update_multicast = 1;
} else {
- DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+ DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n");
lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
/*
@@ -1470,7 +1468,7 @@ smc_open(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
/* Setup the default Register Modes */
lp->tcr_cur_mode = TCR_DEFAULT;
@@ -1514,7 +1512,7 @@ static int smc_close(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- DBG(2, "%s: %s\n", dev->name, __func__);
+ DBG(2, dev, "%s\n", __func__);
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1694,7 +1692,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Reading %d bytes at %d(0x%x)\n",
+ DBG(1, dev, "Reading %d bytes at %d(0x%x)\n",
eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
@@ -1706,7 +1704,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev,
ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
if (ret != 0)
return ret;
- DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
data[i] = (wbuf >> 8) & 0xff;
data[i+1] = wbuf & 0xff;
}
@@ -1719,8 +1717,8 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
int i;
int imax;
- DBG(1, "Writing %d bytes to %d(0x%x)\n",
- eeprom->len, eeprom->offset, eeprom->offset);
+ DBG(1, dev, "Writing %d bytes to %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
imax = smc_ethtool_geteeprom_len(dev);
for (i = 0; i < eeprom->len; i += 2) {
int ret;
@@ -1729,7 +1727,7 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
if (offset > imax)
break;
wbuf = (data[i] << 8) | data[i + 1];
- DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+ DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
if (ret != 0)
return ret;
@@ -1784,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
int timeout = 20;
unsigned long cookie;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
cookie = probe_irq_on();
@@ -1856,21 +1854,21 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
unsigned long irq_flags)
{
struct smc_local *lp = netdev_priv(dev);
- static int version_printed = 0;
int retval;
unsigned int val, revision_register;
const char *version_string;
- DBG(2, "%s: %s\n", CARDNAME, __func__);
+ DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
/* First, see if the high byte is 0x33 */
val = SMC_CURRENT_BANK(lp);
- DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+ DBG(2, dev, "%s: bank signature probe returned 0x%04x\n",
+ CARDNAME, val);
if ((val & 0xFF00) != 0x3300) {
if ((val & 0xFF) == 0x33) {
- printk(KERN_WARNING
- "%s: Detected possible byte-swapped interface"
- " at IOADDR %p\n", CARDNAME, ioaddr);
+ netdev_warn(dev,
+ "%s: Detected possible byte-swapped interface at IOADDR %p\n",
+ CARDNAME, ioaddr);
}
retval = -ENODEV;
goto err_out;
@@ -1897,8 +1895,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
val = SMC_GET_BASE(lp);
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
- printk("%s: IOADDR %p doesn't match configuration (%x).\n",
- CARDNAME, ioaddr, val);
+ netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
+ CARDNAME, ioaddr, val);
}
/*
@@ -1908,21 +1906,19 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
*/
SMC_SELECT_BANK(lp, 3);
revision_register = SMC_GET_REV(lp);
- DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+ DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
version_string = chip_ids[ (revision_register >> 4) & 0xF];
if (!version_string || (revision_register & 0xff00) != 0x3300) {
/* I don't recognize this chip, so... */
- printk("%s: IO %p: Unrecognized revision register 0x%04x"
- ", Contact author.\n", CARDNAME,
- ioaddr, revision_register);
+ netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n",
+ CARDNAME, ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
}
/* At this point I'll assume that the chip is an SMC91x. */
- if (version_printed++ == 0)
- printk("%s", version);
+ pr_info_once("%s\n", version);
/* fill in some of the fields */
dev->base_addr = (unsigned long)ioaddr;
@@ -1940,7 +1936,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/*
* If dev->irq is 0, then the device has to be banged on to see
* what the IRQ is.
- *
+ *
* This banging doesn't always detect the IRQ, for unknown reasons.
* a workaround is to reset the chip and try again.
*
@@ -1965,8 +1961,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
}
}
if (dev->irq == 0) {
- printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
- dev->name);
+ netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
retval = -ENODEV;
goto err_out;
}
@@ -2030,32 +2025,31 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %p IRQ %d",
- dev->name, version_string, revision_register & 0x0f,
- lp->base, dev->irq);
+ netdev_info(dev, "%s (rev %d) at %p IRQ %d",
+ version_string, revision_register & 0x0f,
+ lp->base, dev->irq);
if (dev->dma != (unsigned char)-1)
- printk(" DMA %d", dev->dma);
+ pr_cont(" DMA %d", dev->dma);
- printk("%s%s\n",
+ pr_cont("%s%s\n",
lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", dev->name);
+ netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Ethernet addr: %pM\n",
+ dev->dev_addr);
}
if (lp->phy_type == 0) {
- PRINTK("%s: No PHY found\n", dev->name);
+ PRINTK(dev, "No PHY found\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
- PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+ PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n");
} else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
- PRINTK("%s: PHY LAN83C180\n", dev->name);
+ PRINTK(dev, "PHY LAN83C180\n");
}
}
@@ -2165,7 +2159,8 @@ static inline void smc_request_datacs(struct platform_device *pdev, struct net_d
return;
if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
- printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ netdev_info(ndev, "%s: failed to request datacs memory region.\n",
+ CARDNAME);
return;
}
@@ -2307,7 +2302,7 @@ static int smc_drv_probe(struct platform_device *pdev)
out_free_netdev:
free_netdev(ndev);
out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
+ pr_info("%s: not found (%d).\n", CARDNAME, ret);
return ret;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 98eedb90cdc3..c9d4c872e81d 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -907,8 +907,8 @@ static const char * chip_ids[ 16 ] = {
({ \
int __b = SMC_CURRENT_BANK(lp); \
if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
- printk( "%s: bank reg screwed (0x%04x)\n", \
- CARDNAME, __b ); \
+ pr_err("%s: bank reg screwed (0x%04x)\n", \
+ CARDNAME, __b); \
BUG(); \
} \
reg<<SMC_IO_SHIFT; \
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5fdbc2686eb3..8564f23a6796 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2167,7 +2167,7 @@ static int smsc911x_init(struct net_device *dev)
udelay(1000);
if (to == 0) {
- pr_err("Device not READY in 100ms aborting\n");
+ netdev_err(dev, "Device not READY in 100ms aborting\n");
return -ENODEV;
}
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, 6);
+ memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 5f9e79f7f2df..f433d97aa097 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -19,6 +19,8 @@
***************************************************************************
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -33,7 +35,6 @@
#include "smsc9420.h"
#define DRV_NAME "smsc9420"
-#define PFX DRV_NAME ": "
#define DRV_MDIONAME "smsc9420-mdio"
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
@@ -97,21 +98,6 @@ static uint debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "debug level");
-#define smsc_dbg(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_info(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f "\n", ## a); \
-} while (0)
-
-#define smsc_warn(TYPE, f, a...) \
-do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f "\n", ## a); \
-} while (0)
-
static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
{
return ioread32(pd->ioaddr + offset);
@@ -140,7 +126,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
/* confirm MII not busy */
if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
- smsc_warn(DRV, "MII is busy???");
+ netif_warn(pd, drv, pd->dev, "MII is busy???\n");
goto out;
}
@@ -159,7 +145,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
udelay(10);
}
- smsc_warn(DRV, "MII busy timeout!");
+ netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
out:
spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -178,7 +164,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
/* confirm MII not busy */
if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
- smsc_warn(DRV, "MII is busy???");
+ netif_warn(pd, drv, pd->dev, "MII is busy???\n");
goto out;
}
@@ -200,7 +186,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
udelay(10);
}
- smsc_warn(DRV, "MII busy timeout!");
+ netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
out:
spin_unlock_irqrestore(&pd->phy_lock, flags);
@@ -222,7 +208,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
BUG_ON(!pd);
if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
- smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy");
+ netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__);
return -EIO;
}
@@ -235,7 +221,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
return 0;
} while (timeout--);
- smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out");
+ netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__);
return -EIO;
}
@@ -347,9 +333,9 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
int timeout = 100;
u32 e2cmd;
- smsc_dbg(HW, "op 0x%08x", op);
+ netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op);
if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
- smsc_warn(HW, "Busy at start");
+ netif_warn(pd, hw, pd->dev, "Busy at start\n");
return -EBUSY;
}
@@ -362,12 +348,13 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
} while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
if (!timeout) {
- smsc_info(HW, "TIMED OUT");
+ netif_info(pd, hw, pd->dev, "TIMED OUT\n");
return -EAGAIN;
}
if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
- smsc_info(HW, "Error occurred during eeprom operation");
+ netif_info(pd, hw, pd->dev,
+ "Error occurred during eeprom operation\n");
return -EINVAL;
}
@@ -380,7 +367,7 @@ static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd,
u32 op = E2P_CMD_EPC_CMD_READ_ | address;
int ret;
- smsc_dbg(HW, "address 0x%x", address);
+ netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address);
ret = smsc9420_eeprom_send_cmd(pd, op);
if (!ret)
@@ -395,7 +382,7 @@ static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd,
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
int ret;
- smsc_dbg(HW, "address 0x%x, data 0x%x", address, data);
+ netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data);
ret = smsc9420_eeprom_send_cmd(pd, op);
if (!ret) {
@@ -492,7 +479,8 @@ static void smsc9420_check_mac_address(struct net_device *dev)
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
smsc9420_set_mac_address(dev);
- smsc_dbg(PROBE, "MAC Address is specified by configuration");
+ netif_dbg(pd, probe, pd->dev,
+ "MAC Address is specified by configuration\n");
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
@@ -507,12 +495,14 @@ static void smsc9420_check_mac_address(struct net_device *dev)
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
- smsc_dbg(PROBE, "Mac Address is read from EEPROM");
+ netif_dbg(pd, probe, pd->dev,
+ "Mac Address is read from EEPROM\n");
} else {
/* eeprom values are invalid, generate random MAC */
eth_hw_addr_random(dev);
smsc9420_set_mac_address(dev);
- smsc_dbg(PROBE, "MAC Address is set to random");
+ netif_dbg(pd, probe, pd->dev,
+ "MAC Address is set to random\n");
}
}
}
@@ -535,7 +525,7 @@ static void smsc9420_stop_tx(struct smsc9420_pdata *pd)
}
if (!timeout)
- smsc_warn(IFDOWN, "TX DMAC failed to stop");
+ netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n");
/* ACK Tx DMAC stop bit */
smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
@@ -646,7 +636,8 @@ static void smsc9420_stop_rx(struct smsc9420_pdata *pd)
}
if (!timeout)
- smsc_warn(IFDOWN, "RX DMAC did not stop! timeout.");
+ netif_warn(pd, ifdown, pd->dev,
+ "RX DMAC did not stop! timeout\n");
/* ACK the Rx DMAC stop bit */
smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
@@ -736,7 +727,7 @@ static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd)
smsc9420_reg_read(pd, BUS_MODE);
udelay(2);
if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
- smsc_warn(DRV, "Software reset not cleared");
+ netif_warn(pd, drv, pd->dev, "Software reset not cleared\n");
}
static int smsc9420_stop(struct net_device *dev)
@@ -855,7 +846,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pd->pdev, mapping)) {
dev_kfree_skb_any(skb);
- smsc_warn(RX_ERR, "pci_map_single failed!");
+ netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
return -ENOMEM;
}
@@ -1004,7 +995,8 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
mapping = pci_map_single(pd->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pd->pdev, mapping)) {
- smsc_warn(TX_ERR, "pci_map_single failed, dropping packet");
+ netif_warn(pd, tx_err, pd->dev,
+ "pci_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
}
@@ -1056,12 +1048,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
if (dev->flags & IFF_PROMISC) {
- smsc_dbg(HW, "Promiscuous Mode Enabled");
+ netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n");
mac_cr |= MAC_CR_PRMS_;
mac_cr &= (~MAC_CR_MCPAS_);
mac_cr &= (~MAC_CR_HPFILT_);
} else if (dev->flags & IFF_ALLMULTI) {
- smsc_dbg(HW, "Receive all Multicast Enabled");
+ netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n");
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
@@ -1069,7 +1061,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
- smsc_dbg(HW, "Multicast filter enabled");
+ netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n");
netdev_for_each_mc_addr(ha, dev) {
u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
@@ -1087,7 +1079,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_MCPAS_);
mac_cr |= MAC_CR_HPFILT_;
} else {
- smsc_dbg(HW, "Receive own packets only.");
+ netif_dbg(pd, hw, pd->dev, "Receive own packets only\n");
smsc9420_reg_write(pd, HASHH, 0);
smsc9420_reg_write(pd, HASHL, 0);
@@ -1115,11 +1107,11 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
else
flow = 0;
- smsc_info(LINK, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n",
+ cap & FLOW_CTRL_RX ? "enabled" : "disabled",
+ cap & FLOW_CTRL_TX ? "enabled" : "disabled");
} else {
- smsc_info(LINK, "half duplex");
+ netif_info(pd, link, pd->dev, "half duplex\n");
flow = 0;
}
@@ -1137,10 +1129,10 @@ static void smsc9420_phy_adjust_link(struct net_device *dev)
if (phy_dev->duplex != pd->last_duplex) {
u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
if (phy_dev->duplex) {
- smsc_dbg(LINK, "full duplex mode");
+ netif_dbg(pd, link, pd->dev, "full duplex mode\n");
mac_cr |= MAC_CR_FDPX_;
} else {
- smsc_dbg(LINK, "half duplex mode");
+ netif_dbg(pd, link, pd->dev, "half duplex mode\n");
mac_cr &= ~MAC_CR_FDPX_;
}
smsc9420_reg_write(pd, MAC_CR, mac_cr);
@@ -1152,9 +1144,9 @@ static void smsc9420_phy_adjust_link(struct net_device *dev)
carrier = netif_carrier_ok(dev);
if (carrier != pd->last_carrier) {
if (carrier)
- smsc_dbg(LINK, "carrier OK");
+ netif_dbg(pd, link, pd->dev, "carrier OK\n");
else
- smsc_dbg(LINK, "no carrier");
+ netif_dbg(pd, link, pd->dev, "no carrier\n");
pd->last_carrier = carrier;
}
}
@@ -1168,24 +1160,24 @@ static int smsc9420_mii_probe(struct net_device *dev)
/* Device only supports internal PHY at address 1 */
if (!pd->mii_bus->phy_map[1]) {
- pr_err("%s: no PHY found at address 1\n", dev->name);
+ netdev_err(dev, "no PHY found at address 1\n");
return -ENODEV;
}
phydev = pd->mii_bus->phy_map[1];
- smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
- phydev->phy_id);
+ netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n",
+ phydev->addr, phydev->phy_id);
phydev = phy_connect(dev, dev_name(&phydev->dev),
smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- pr_err("%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
- pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
/* mask with MAC supported features */
phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -1223,12 +1215,12 @@ static int smsc9420_mii_init(struct net_device *dev)
pd->mii_bus->phy_mask = ~(1 << 1);
if (mdiobus_register(pd->mii_bus)) {
- smsc_warn(PROBE, "Error registering mii bus");
+ netif_warn(pd, probe, pd->dev, "Error registering mii bus\n");
goto err_out_free_bus_2;
}
if (smsc9420_mii_probe(dev) < 0) {
- smsc_warn(PROBE, "Error probing mii bus");
+ netif_warn(pd, probe, pd->dev, "Error probing mii bus\n");
goto err_out_unregister_bus_3;
}
@@ -1281,12 +1273,11 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->rx_ring);
- pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- RX_RING_SIZE), GFP_KERNEL);
- if (pd->rx_buffers == NULL) {
- smsc_warn(IFUP, "Failed to allocated rx_buffers");
+ pd->rx_buffers = kmalloc_array(RX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (pd->rx_buffers == NULL)
goto out;
- }
/* initialize the rx ring */
for (i = 0; i < RX_RING_SIZE; i++) {
@@ -1301,7 +1292,8 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
/* now allocate the entire ring of skbs */
for (i = 0; i < RX_RING_SIZE; i++) {
if (smsc9420_alloc_rx_buffer(pd, i)) {
- smsc_warn(IFUP, "failed to allocate rx skb %d", i);
+ netif_warn(pd, ifup, pd->dev,
+ "failed to allocate rx skb %d\n", i);
goto out_free_rx_skbs;
}
}
@@ -1310,13 +1302,14 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
pd->rx_ring_tail = 0;
smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
- smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1));
+ netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n",
+ smsc9420_reg_read(pd, VLAN1));
if (pd->rx_csum) {
/* Enable RX COE */
u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
smsc9420_reg_write(pd, COE_CR, coe);
- smsc_dbg(IFUP, "COE_CR = 0x%08x", coe);
+ netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe);
}
smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
@@ -1339,7 +1332,8 @@ static int smsc9420_open(struct net_device *dev)
int result = 0, timeout;
if (!is_valid_ether_addr(dev->dev_addr)) {
- smsc_warn(IFUP, "dev_addr is not a valid MAC address");
+ netif_warn(pd, ifup, pd->dev,
+ "dev_addr is not a valid MAC address\n");
result = -EADDRNOTAVAIL;
goto out_0;
}
@@ -1358,7 +1352,7 @@ static int smsc9420_open(struct net_device *dev)
result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
if (result) {
- smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
+ netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq);
result = -ENODEV;
goto out_0;
}
@@ -1393,7 +1387,7 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_pci_flush_write(pd);
/* test the IRQ connection to the ISR */
- smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
+ netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq);
pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags);
@@ -1423,30 +1417,32 @@ static int smsc9420_open(struct net_device *dev)
spin_unlock_irqrestore(&pd->int_lock, flags);
if (!pd->software_irq_signal) {
- smsc_warn(IFUP, "ISR failed signaling test");
+ netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n");
result = -ENODEV;
goto out_free_irq_1;
}
- smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
+ netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq);
result = smsc9420_alloc_tx_ring(pd);
if (result) {
- smsc_warn(IFUP, "Failed to Initialize tx dma ring");
+ netif_warn(pd, ifup, pd->dev,
+ "Failed to Initialize tx dma ring\n");
result = -ENOMEM;
goto out_free_irq_1;
}
result = smsc9420_alloc_rx_ring(pd);
if (result) {
- smsc_warn(IFUP, "Failed to Initialize rx dma ring");
+ netif_warn(pd, ifup, pd->dev,
+ "Failed to Initialize rx dma ring\n");
result = -ENOMEM;
goto out_free_tx_ring_2;
}
result = smsc9420_mii_init(dev);
if (result) {
- smsc_warn(IFUP, "Failed to initialize Phy");
+ netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n");
result = -ENODEV;
goto out_free_rx_ring_3;
}
@@ -1547,7 +1543,8 @@ static int smsc9420_resume(struct pci_dev *pdev)
err = pci_enable_wake(pdev, 0, 0);
if (err)
- smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
+ netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
+ err);
if (netif_running(dev)) {
/* FIXME: gross. It looks like ancient PM relic.*/
@@ -1582,12 +1579,12 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = 0;
u32 id_rev;
- printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
+ pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION);
/* First do the PCI initialisation */
result = pci_enable_device(pdev);
if (unlikely(result)) {
- printk(KERN_ERR "Cannot enable smsc9420\n");
+ pr_err("Cannot enable smsc9420\n");
goto out_0;
}
@@ -1600,24 +1597,24 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_NETDEV_DEV(dev, &pdev->dev);
if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
- printk(KERN_ERR "Cannot find PCI device base address\n");
+ netdev_err(dev, "Cannot find PCI device base address\n");
goto out_free_netdev_2;
}
if ((pci_request_regions(pdev, DRV_NAME))) {
- printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+ netdev_err(dev, "Cannot obtain PCI resources, aborting\n");
goto out_free_netdev_2;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "No usable DMA configuration, aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting\n");
goto out_free_regions_3;
}
virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
pci_resource_len(pdev, SMSC_BAR));
if (!virt_addr) {
- printk(KERN_ERR "Cannot map device registers, aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting\n");
goto out_free_regions_3;
}
@@ -1646,16 +1643,17 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd->msg_enable = smsc_debug;
pd->rx_csum = true;
- smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr);
+ netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr);
id_rev = smsc9420_reg_read(pd, ID_REV);
switch (id_rev & 0xFFFF0000) {
case 0x94200000:
- smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev);
+ netif_info(pd, probe, pd->dev,
+ "LAN9420 identified, ID_REV=0x%08X\n", id_rev);
break;
default:
- smsc_warn(PROBE, "LAN9420 NOT identified");
- smsc_warn(PROBE, "ID_REV=0x%08X", id_rev);
+ netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n");
+ netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev);
goto out_free_dmadesc_5;
}
@@ -1670,7 +1668,8 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = register_netdev(dev);
if (result) {
- smsc_warn(PROBE, "error %i registering device", result);
+ netif_warn(pd, probe, pd->dev, "error %i registering device\n",
+ result);
goto out_free_dmadesc_5;
}
@@ -1707,8 +1706,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
if (!dev)
return;
- pci_set_drvdata(pdev, NULL);
-
pd = netdev_priv(dev);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7eb8babed2cb..fc94f202a43e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -451,14 +451,14 @@ struct mac_device_info {
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
- unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 8e5662ce488b..def266da55db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -104,14 +104,13 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 48ec001566b5..8607488cbcfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -128,8 +128,8 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_err_octets;
};
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f16a9bdf45bb..22f89ffdfd95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -110,14 +110,14 @@ struct stmmac_priv {
extern int phyaddr;
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 023b7c29cb2f..644d80ece067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(ndev);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, priv->ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 759441b29e53..b4d50d74ba18 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
- memcpy(dev_addr, addr, 6);
+ memcpy(dev_addr, addr, ETH_ALEN);
goto done;
}
#endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return -ENODEV;
}
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f28460ce24a7..388540fcb977 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9875,7 +9875,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return err;
}
@@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e62df2b81302..b5655b79bd3b 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#else
get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
#endif
@@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev)
iounmap(gp->regs);
pci_release_regions(pdev);
free_netdev(dev);
-
- pci_set_drvdata(pdev, NULL);
}
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index e37b587b3860..0dbf46f08ed5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
- if (qfe_slot != -1 && addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (qfe_slot != -1 && addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
}
#else
get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
pci_release_regions(hp->happy_dev);
free_netdev(net_dev);
-
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b072f4dba033..5695ae2411de 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 571452e786d5..dd0dd6279b4e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2447,7 +2447,6 @@ static void bdx_remove(struct pci_dev *pdev)
iounmap(nic->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
vfree(nic);
RET();
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de71b1ec4625..53150c25a96b 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
To compile this driver as a module, choose M here: the module
will be called davinci_cpdma. This is recommended.
+config TI_CPSW_PHY_SEL
+ boolean "TI CPSW Switch Phy sel Support"
+ depends on TI_CPSW
+ ---help---
+ This driver supports configuring of the phy mode connected to
+ the CPSW.
+
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
+ select TI_CPSW_PHY_SEL
---help---
This driver supports TI's CPSW Ethernet Switch.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index c65148e8aa1d..9cfaab8152be 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 000000000000..148da9ae8366
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+
+struct cpsw_phy_sel_priv {
+ struct device *dev;
+ u32 __iomem *gmii_sel;
+ bool rmii_clock_external;
+ void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mode <<= slave * 2;
+
+ if (priv->rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+ struct device_node *node = (struct device_node *)data;
+ return dev->of_node == node &&
+ dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+ struct device_node *node;
+ struct cpsw_phy_sel_priv *priv;
+
+ node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ if (!node) {
+ dev_err(dev, "Phy mode driver DT not found\n");
+ return;
+ }
+
+ dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ priv = dev_get_drvdata(dev);
+
+ priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+ {
+ .compatible = "ti,am3352-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct of_device_id *of_id;
+ struct cpsw_phy_sel_priv *priv;
+
+ of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+ return -ENOMEM;
+ }
+
+ priv->cpsw_phy_sel = of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+ priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->gmii_sel))
+ return PTR_ERR(priv->gmii_sel);
+
+ if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+ priv->rmii_clock_external = true;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+ .probe = cpsw_phy_sel_probe,
+ .driver = {
+ .name = "cpsw-phy-sel",
+ .owner = THIS_MODULE,
+ .of_match_table = cpsw_phy_sel_id_table,
+ },
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cc3ce557e4aa..90d41d26ec6d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,8 +367,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct resource *cpsw_res;
- struct resource *cpsw_wr_res;
struct napi_struct napi;
struct device *dev;
struct cpsw_platform_data data;
@@ -1016,6 +1014,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
dev_info(priv->dev, "phy found : id is : 0x%x\n",
slave->phy->phy_id);
phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+ slave->slave_num);
}
}
@@ -1705,62 +1707,55 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (of_property_read_u32(node, "active_slave", &prop)) {
pr_err("Missing active_slave property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
pr_err("Missing cpts_clock_shift property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->cpts_clock_shift = prop;
- data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
- GFP_KERNEL);
+ data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+ * sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
if (!data->slave_data)
- return -EINVAL;
+ return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
pr_err("Missing ale_entries property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
pr_err("Missing bd_ram_size property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
pr_err("Missing rx_descs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
pr_err("Missing mac_control property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
data->mac_control = prop;
@@ -1791,8 +1786,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
- ret = -EINVAL;
- goto error_ret;
+ return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
@@ -1822,10 +1816,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
return 0;
-
-error_ret:
- kfree(data->slave_data);
- return ret;
}
static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1867,7 +1857,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->coal_intvl = 0;
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
- priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1911,8 +1900,8 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpsw_priv *priv;
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
- void __iomem *ss_regs, *wr_regs;
- struct resource *res;
+ void __iomem *ss_regs;
+ struct resource *res, *ss_res;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i, k = 0;
@@ -1948,7 +1937,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (cpsw_probe_dt(&priv->data, pdev)) {
pr_err("cpsw: platform data missing\n");
ret = -ENODEV;
- goto clean_ndev_ret;
+ goto clean_runtime_disable_ret;
}
data = &priv->data;
@@ -1962,11 +1951,12 @@ static int cpsw_probe(struct platform_device *pdev)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
- priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
- GFP_KERNEL);
+ priv->slaves = devm_kzalloc(&pdev->dev,
+ sizeof(struct cpsw_slave) * data->slaves,
+ GFP_KERNEL);
if (!priv->slaves) {
- ret = -EBUSY;
- goto clean_ndev_ret;
+ ret = -ENOMEM;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
@@ -1974,55 +1964,31 @@ static int cpsw_probe(struct platform_device *pdev)
priv->slaves[0].ndev = ndev;
priv->emac_port = 0;
- priv->clk = clk_get(&pdev->dev, "fck");
+ priv->clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "fck is not found\n");
+ dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_slave_ret;
+ goto clean_runtime_disable_ret;
}
priv->coal_intvl = 0;
priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
- priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!priv->cpsw_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_clk_ret;
- }
- if (!request_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_clk_ret;
- }
- ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
- if (!ss_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_iores_ret;
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
priv->version = __raw_readl(&priv->regs->id_ver);
priv->host_port = HOST_PORT_NUM;
- priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!priv->cpsw_wr_res) {
- dev_err(priv->dev, "error getting i/o resource\n");
- ret = -ENOENT;
- goto clean_iomap_ret;
- }
- if (!request_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res), ndev->name)) {
- dev_err(priv->dev, "failed request i/o region\n");
- ret = -ENXIO;
- goto clean_iomap_ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->wr_regs)) {
+ ret = PTR_ERR(priv->wr_regs);
+ goto clean_runtime_disable_ret;
}
- wr_regs = ioremap(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
- if (!wr_regs) {
- dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_wr_iores_ret;
- }
- priv->wr_regs = wr_regs;
memset(&dma_params, 0, sizeof(dma_params));
memset(&ale_params, 0, sizeof(ale_params));
@@ -2053,12 +2019,12 @@ static int cpsw_probe(struct platform_device *pdev)
slave_size = CPSW2_SLAVE_SIZE;
sliver_offset = CPSW2_SLIVER_OFFSET;
dma_params.desc_mem_phys =
- (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+ (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
break;
default:
dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
ret = -ENODEV;
- goto clean_cpsw_wr_iores_ret;
+ goto clean_runtime_disable_ret;
}
for (i = 0; i < priv->data.slaves; i++) {
struct cpsw_slave *slave = &priv->slaves[i];
@@ -2086,7 +2052,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!priv->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_wr_iomap_ret;
+ goto clean_runtime_disable_ret;
}
priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2121,8 +2087,8 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, cpsw_interrupt, 0,
- dev_name(&pdev->dev), priv)) {
+ if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+ dev_name(priv->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2144,7 +2110,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
if (cpts_register(&pdev->dev, priv->cpts,
@@ -2152,44 +2118,27 @@ static int cpsw_probe(struct platform_device *pdev)
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
- priv->cpsw_res->start, ndev->irq);
+ ss_res->start, ndev->irq);
if (priv->data.dual_emac) {
ret = cpsw_probe_dual_emac(pdev, priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_irq_ret;
+ goto clean_ale_ret;
}
}
return 0;
-clean_irq_ret:
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
- iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
- iounmap(priv->regs);
-clean_cpsw_iores_ret:
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
-clean_clk_ret:
- clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
- kfree(priv->slaves);
clean_ndev_ret:
- kfree(priv->data.slave_data);
free_netdev(priv->ndev);
return ret;
}
@@ -2198,30 +2147,18 @@ static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- int i;
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
cpts_unregister(priv->cpts);
- for (i = 0; i < priv->num_irqs; i++)
- free_irq(priv->irqs_table[i], priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
- iounmap(priv->regs);
- release_mem_region(priv->cpsw_res->start,
- resource_size(priv->cpsw_res));
- iounmap(priv->wr_regs);
- release_mem_region(priv->cpsw_wr_res->start,
- resource_size(priv->cpsw_wr_res));
pm_runtime_disable(&pdev->dev);
- clk_put(priv->clk);
- kfree(priv->slaves);
- kfree(priv->data.slave_data);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
@@ -2277,7 +2214,7 @@ static struct platform_driver cpsw_driver = {
.name = "cpsw",
.owner = THIS_MODULE,
.pm = &cpsw_pm_ops,
- .of_match_table = of_match_ptr(cpsw_of_mtable),
+ .of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
.remove = cpsw_remove,
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index eb3e101ec048..574f49da693f 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
bool dual_emac; /* Enable Dual EMAC mode */
};
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index fe993cdd7e23..1a581ef7eee8 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -127,8 +127,8 @@ struct cpts {
};
#ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
#else
static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
}
#endif
-extern int cpts_register(struct device *dev, struct cpts *cpts,
- u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a32ef9d63ae..41ba974bf37c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1852,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
- memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 591437e59b90..62b19be5183d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev)
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
cancel_work_sync(&priv->tlan_tqueue);
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 13e6fff8ca23..628b736e5ae7 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
nz_addr |= mac[i];
if (nz_addr) {
- memcpy(dev->dev_addr, mac, 6);
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
dev->addr_len = 6;
} else {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 309abb472aa2..8505196be9f5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
}
#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
#else
static inline void udbg_shutdown_ps3gelic(void) {}
#endif
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
- struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index f7e51b7d7049..11f443d8e4ea 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5734480c1ecf..3f4a32e39d27 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2478,7 +2478,6 @@ out_release_regions:
pci_release_regions(pdev);
out_disable_dev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return NULL;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index 4ba2135474d1..9b6af0845a11 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -29,8 +29,8 @@
#include <linux/sungem_phy.h>
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
extern const struct ethtool_ops spider_net_ethtool_ops;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index a971b9cca564..1322546d92ac 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev)
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
free_netdev(dev);
- pci_set_drvdata(pdev, NULL);
}
static int
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index bdf697b184ae..4a7293ed95e9 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2292,7 +2292,6 @@ static void rhine_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static void rhine_shutdown (struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4c619ea5189f..74234a51c851 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -31,7 +31,7 @@
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
@@ -63,13 +63,13 @@
#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
/* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
+#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
/* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
-#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
* in the HW spec */
@@ -77,21 +77,21 @@
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
/* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
-#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
/* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
/* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
-#define XEL_HEADER_OFFSET 12 /* Offset to length field */
-#define XEL_HEADER_SHIFT 16 /* Shift value for length */
+#define XEL_HEADER_OFFSET 12 /* Offset to length field */
+#define XEL_HEADER_SHIFT 16 /* Shift value for length */
/* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
* This function un maps the IO region of the Emaclite device and frees the net
* device.
*/
-static void xemaclite_remove_ndev(struct net_device *ndev,
- struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = netdev_priv(ndev);
-
- if (lp->base_addr)
- devm_iounmap(&pdev->dev, lp->base_addr);
free_netdev(ndev);
}
}
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (mac_address)
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, 6);
+ memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
else
dev_warn(dev, "No MAC address found\n");
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
return 0;
error:
- xemaclite_remove_ndev(ndev, ofdev);
+ xemaclite_remove_ndev(ndev);
return rc;
}
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- xemaclite_remove_ndev(ndev, of_dev);
+ xemaclite_remove_ndev(ndev);
return 0;
}
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index a20ed1a98099..f83993590174 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
*/
* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
a[1] = 0 ;
- memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+ memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
CHECK_NPP() ;
/* set memory address reg for writes */
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3ca308b28214..bd1166bf8f61 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -469,20 +469,20 @@ struct s_smc {
extern const struct fddi_addr fddi_broadcast;
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
#endif /* _SCMECM_ */
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index f5d7305a5784..713d303a06a9 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -436,7 +436,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
smt_reset_defaults(smc, 0);
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+ memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
if ((unsigned short) frame[1 + 10] != 0)
return;
SRBit = frame[1 + 6] & 0x01;
- memcpy(&frame[1 + 6], hw_addr, 6);
+ memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
frame[8] |= SRBit;
} // CheckSourceAddress
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index a974727dd9a2..636b65c66d49 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser_fdx", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index e349d867449b..f9a8976195ba 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser12", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index bc1d52170389..4bc6ee8e7987 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr,
- IRQF_DISABLED, "AX.25 SCC",
+ 0, "AX.25 SCC",
(void *)(long) hwcfg.irq))
printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
else
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5af1c3e5032a..1971411574db 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
goto out_release_base;
}
outb(0, IER(dev->base_addr));
- if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
ret = -EBUSY;
goto out_release_base;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c74f384c87d5..303c4bd26e17 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
#else
- if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+ if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
return -EBUSY;
}
- if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+ if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
free_irq(port->irq, dev);
return -EBUSY;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 31bcb98ef356..768dfe9a9315 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
return 0;
rc = request_irq (self->io.irq, toshoboe_interrupt,
- IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+ IRQF_SHARED, dev->name, self);
if (rc)
return rc;
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
self->io.fir_base = self->base;
self->io.fir_ext = OBOE_IO_EXTENT;
self->io.irq = pci_dev->irq;
- self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+ self->io.irqflags = IRQF_SHARED;
self->speed = self->io.speed = 9600;
self->async = 0;
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4455425f1c77..ff45cd0d60e8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+ err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 89682b49900f..8d9ae5a086d5 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
- err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+ err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 6d5b1e2b1289..f50b9c1c0639 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -102,28 +102,29 @@ struct sir_driver {
/* exported */
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+ const char *name);
+int sirdev_put_instance(struct sir_dev *self);
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
/* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
/* not exported */
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
/* inline helpers */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9bf46bd19b87..af4aaa5893ff 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -297,7 +297,13 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
int ret;
const struct macvlan_dev *vlan = netdev_priv(dev);
- ret = macvlan_queue_xmit(skb, dev);
+ if (vlan->fwd_priv) {
+ skb->dev = vlan->lowerdev;
+ ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+ } else {
+ ret = macvlan_queue_xmit(skb, dev);
+ }
+
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct macvlan_pcpu_stats *pcpu_stats;
@@ -347,6 +353,21 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ vlan->fwd_priv =
+ lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+ /* If we get a NULL pointer back, or if we get an error
+ * then we should just fall through to the non accelerated path
+ */
+ if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
+ vlan->fwd_priv = NULL;
+ } else {
+ dev->features &= ~NETIF_F_LLTX;
+ return 0;
+ }
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -367,6 +388,11 @@ hash_add:
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ }
return err;
}
@@ -375,6 +401,13 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ return 0;
+ }
+
dev_uc_unsync(lowerdev, dev);
dev_mc_unsync(lowerdev, dev);
@@ -828,22 +861,22 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_inherit(dev, lowerdev);
}
+ port->count += 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto destroy_port;
+
+ dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
goto destroy_port;
- port->count += 1;
- err = register_netdevice(dev);
- if (err < 0)
- goto upper_dev_unlink;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
-upper_dev_unlink:
- netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index c9a15925a1f7..ba2f5e710af1 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -34,6 +34,8 @@
*
****************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -320,8 +322,8 @@ static ssize_t store_enabled(struct netconsole_target *nt,
if (enabled < 0 || enabled > 1)
return -EINVAL;
if (enabled == nt->enabled) {
- printk(KERN_INFO "netconsole: network logging has already %s\n",
- nt->enabled ? "started" : "stopped");
+ pr_info("network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
return -EINVAL;
}
@@ -336,7 +338,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
if (err)
return err;
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("netconsole: network logging started\n");
} else { /* 0 */
/* We need to disable the netconsole before cleaning it up
* otherwise we might end up in write_msg() with
@@ -360,9 +362,8 @@ static ssize_t store_dev_name(struct netconsole_target *nt,
size_t len;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -383,9 +384,8 @@ static ssize_t store_local_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -402,9 +402,8 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
int rv;
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -419,9 +418,8 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -429,7 +427,7 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -450,9 +448,8 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
size_t count)
{
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -460,7 +457,7 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
const char *end;
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
- printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ pr_err("invalid IPv6 address at: <%c>\n", *end);
return -EINVAL;
}
nt->np.ipv6 = true;
@@ -483,9 +480,8 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
u8 remote_mac[ETH_ALEN];
if (nt->enabled) {
- printk(KERN_ERR "netconsole: target (%s) is enabled, "
- "disable to update parameters\n",
- config_item_name(&nt->item));
+ pr_err("target (%s) is enabled, disable to update parameters\n",
+ config_item_name(&nt->item));
return -EINVAL;
}
@@ -708,19 +704,20 @@ restart:
}
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
- printk(KERN_INFO "netconsole: network logging stopped on "
- "interface %s as it ", dev->name);
+ const char *msg = "had an event";
switch (event) {
case NETDEV_UNREGISTER:
- printk(KERN_CONT "unregistered\n");
+ msg = "unregistered";
break;
case NETDEV_RELEASE:
- printk(KERN_CONT "released slaves\n");
+ msg = "released slaves";
break;
case NETDEV_JOIN:
- printk(KERN_CONT "is joining a master device\n");
+ msg = "is joining a master device";
break;
}
+ pr_info("network logging stopped on interface %s as it %s\n",
+ dev->name, msg);
}
done:
@@ -806,7 +803,7 @@ static int __init init_netconsole(void)
goto undonotifier;
register_console(&netconsole);
- printk(KERN_INFO "netconsole: network logging started\n");
+ pr_info("network logging started\n");
return err;
@@ -814,7 +811,7 @@ undonotifier:
unregister_netdevice_notifier(&netconsole_netdev_notifier);
fail:
- printk(KERN_ERR "netconsole: cleaning up\n");
+ pr_err("cleaning up\n");
/*
* Remove all targets and destroy them (only targets created
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 342561ad3158..9b5d46c03eed 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -154,6 +154,13 @@ config MDIO_SUN4I
interface units of the Allwinner SoC that have an EMAC (A10,
A12, A10s, etc.)
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
config MDIO_BUS_MUX
tristate
depends on OF_MDIO
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 23a2ab2e847e..9013dfa12aa3 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ac22283aaf23..bc71947b1ec3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts |= WAKE_MAGIC;
}
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_WOL_ENABLE;
+
+ value = phy_read(phydev, MII_BMCR);
+
+ if (wol_enabled)
+ value |= BMCR_ISOLATE;
+ else
+ value |= BMCR_PDOWN;
+
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ int value;
+
+ mutex_lock(&phydev->lock);
+
+ value = phy_read(phydev, MII_BMCR);
+ value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+ phy_write(phydev, MII_BMCR, value);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e91477362d4..2e3c778ea9bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -34,9 +34,9 @@
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define MII_MARVELL_PHY_PAGE 22
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
new file mode 100644
index 000000000000..a5741cb0304e
--- /dev/null
+++ b/drivers/net/phy/mdio-moxart.c
@@ -0,0 +1,201 @@
+/* MOXA ART Ethernet (RTL8201CP) MDIO interface driver
+ *
+ * Copyright (C) 2013 Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define REG_PHY_CTRL 0
+#define REG_PHY_WRITE_DATA 4
+
+/* REG_PHY_CTRL */
+#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD BIT(26)
+#define REGAD_MASK 0x3e00000
+#define PHYAD_MASK 0x1f0000
+#define MIIRDATA_MASK 0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK 0xffff
+
+struct moxart_mdio_data {
+ void __iomem *base;
+};
+
+static int moxart_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct moxart_mdio_data *data = bus->priv;
+ u32 ctrl = 0;
+ unsigned int count = 5;
+
+ dev_dbg(&bus->dev, "%s\n", __func__);
+
+ ctrl |= MIIRD | ((mii_id << 16) & PHYAD_MASK) |
+ ((regnum << 21) & REGAD_MASK);
+
+ writel(ctrl, data->base + REG_PHY_CTRL);
+
+ do {
+ ctrl = readl(data->base + REG_PHY_CTRL);
+
+ if (!(ctrl & MIIRD))
+ return ctrl & MIIRDATA_MASK;
+
+ mdelay(10);
+ count--;
+ } while (count > 0);
+
+ dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int moxart_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct moxart_mdio_data *data = bus->priv;
+ u32 ctrl = 0;
+ unsigned int count = 5;
+
+ dev_dbg(&bus->dev, "%s\n", __func__);
+
+ ctrl |= MIIWR | ((mii_id << 16) & PHYAD_MASK) |
+ ((regnum << 21) & REGAD_MASK);
+
+ value &= MIIWDATA_MASK;
+
+ writel(value, data->base + REG_PHY_WRITE_DATA);
+ writel(ctrl, data->base + REG_PHY_CTRL);
+
+ do {
+ ctrl = readl(data->base + REG_PHY_CTRL);
+
+ if (!(ctrl & MIIWR))
+ return 0;
+
+ mdelay(10);
+ count--;
+ } while (count > 0);
+
+ dev_dbg(&bus->dev, "%s timed out\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int moxart_mdio_reset(struct mii_bus *bus)
+{
+ int data, i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ data = moxart_mdio_read(bus, i, MII_BMCR);
+ if (data < 0)
+ continue;
+
+ data |= BMCR_RESET;
+ if (moxart_mdio_write(bus, i, MII_BMCR, data) < 0)
+ continue;
+ }
+
+ return 0;
+}
+
+static int moxart_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct moxart_mdio_data *data;
+ struct resource *res;
+ int ret, i;
+
+ bus = mdiobus_alloc_size(sizeof(*data));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "MOXA ART Ethernet MII";
+ bus->read = &moxart_mdio_read;
+ bus->write = &moxart_mdio_write;
+ bus->reset = &moxart_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
+ if (!bus->irq) {
+ ret = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ /* Setting PHY_IGNORE_INTERRUPT here even if it has no effect,
+ * of_mdiobus_register() sets these PHY_POLL.
+ * Ideally, the interrupt from MAC controller could be used to
+ * detect link state changes, not polling, i.e. if there was
+ * a way phy_driver could set PHY_HAS_INTERRUPT but have that
+ * interrupt handled in ethernet drivercode.
+ */
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_IGNORE_INTERRUPT;
+
+ data = bus->priv;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->base)) {
+ ret = PTR_ERR(data->base);
+ goto err_out_free_mdiobus;
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0)
+ goto err_out_free_mdiobus;
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_out_free_mdiobus:
+ mdiobus_free(bus);
+ return ret;
+}
+
+static int moxart_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mdio_dt_ids[] = {
+ { .compatible = "moxa,moxart-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids);
+
+static struct platform_driver moxart_mdio_driver = {
+ .probe = moxart_mdio_probe,
+ .remove = moxart_mdio_remove,
+ .driver = {
+ .name = "moxart-mdio",
+ .of_match_table = moxart_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(moxart_mdio_driver);
+
+MODULE_DESCRIPTION("MOXA ART MDIO interface driver");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c31aad0004cb..3ae28f420868 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
}, {
.phy_id = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
} };
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 138de837977f..fa1d69a38ccf 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,6 +64,18 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
return err;
}
+/* RTL8201CP */
+static struct phy_driver rtl8201cp_driver = {
+ .phy_id = 0x00008201,
+ .name = "RTL8201CP Ethernet",
+ .phy_id_mask = 0x0000ffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
/* RTL8211B */
static struct phy_driver rtl8211b_driver = {
.phy_id = 0x001cc912,
@@ -98,6 +110,9 @@ static int __init realtek_init(void)
{
int ret;
+ ret = phy_driver_register(&rtl8201cp_driver);
+ if (ret < 0)
+ return -ENODEV;
ret = phy_driver_register(&rtl8211b_driver);
if (ret < 0)
return -ENODEV;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 1f7bef90b467..7b4ff35c8bf7 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa) {
- memcpy(eth->h_source, dev->dev_addr, 6);
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 40db31233313..85e4a01670f0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -242,6 +242,21 @@ config USB_NET_CDC_NCM
* ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
* Ericsson F5521gw Mobile Broadband Module
+config USB_NET_HUAWEI_CDC_NCM
+ tristate "Huawei NCM embedded AT channel support"
+ depends on USB_USBNET
+ select USB_WDM
+ select USB_NET_CDC_NCM
+ help
+ This driver supports huawei-style NCM devices, that use NCM as a
+ transport for other protocols, usually an embedded AT channel.
+ Good examples are:
+ * Huawei E3131
+ * Huawei E3251
+
+ To compile this driver as a module, choose M here: the module will be
+ called huawei_cdc_ncm.ko.
+
config USB_NET_CDC_MBIM
tristate "CDC MBIM support"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 8b342cf992fd..b17b5e88bbaf 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
+obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2d8e33..df507e6dbb9c 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
memset(catc->multicast, 0, 64);
catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *netdev;
struct catc *catc;
- u8 broadcast[6];
+ u8 broadcast[ETH_ALEN];
int i, pktsz;
if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "Filling the multicast list.\n");
- memset(broadcast, 0xff, 6);
+ memset(broadcast, 0xff, ETH_ALEN);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 7d78669000d7..6358d420e185 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
static struct usb_driver usbpn_driver;
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
static const char ifname[] = "usbpn%d";
const struct usb_cdc_union_desc *union_header = NULL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 25ba7eca9a13..c9f3281506af 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,8 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
#include <linux/usb/cdc_ncm.h>
+#include <net/ipv6.h>
+#include <net/addrconf.h>
/* driver specific data - must match cdc_ncm usage */
struct cdc_mbim_state {
@@ -42,13 +44,11 @@ static int cdc_mbim_manage_power(struct usbnet *dev, int on)
if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
/* need autopm_get/put here to ensure the usbcore sees the new value */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
@@ -173,7 +173,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
}
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -184,6 +184,60 @@ error:
return NULL;
}
+/* Some devices are known to send Neigbor Solicitation messages and
+ * require Neigbor Advertisement replies. The IPv6 core will not
+ * respond since IFF_NOARP is set, so we must handle them ourselves.
+ */
+static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+ struct net_device *netdev;
+ struct inet6_dev *in6_dev;
+ bool is_router;
+
+ /* we'll only respond to requests from unicast addresses to
+ * our solicited node addresses.
+ */
+ if (!ipv6_addr_is_solict_mult(&iph->daddr) ||
+ !(ipv6_addr_type(&iph->saddr) & IPV6_ADDR_UNICAST))
+ return;
+
+ /* need to send the NA on the VLAN dev, if any */
+ if (tci)
+ netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
+ tci);
+ else
+ netdev = dev->net;
+ if (!netdev)
+ return;
+
+ in6_dev = in6_dev_get(netdev);
+ if (!in6_dev)
+ return;
+ is_router = !!in6_dev->cnf.forwarding;
+ in6_dev_put(in6_dev);
+
+ /* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
+ ipv6_stub->ndisc_send_na(netdev, NULL, &iph->saddr, &msg->target,
+ is_router /* router */,
+ true /* solicited */,
+ false /* override */,
+ true /* inc_opt */);
+}
+
+static bool is_neigh_solicit(u8 *buf, size_t len)
+{
+ struct ipv6hdr *iph = (void *)buf;
+ struct nd_msg *msg = (void *)(iph + 1);
+
+ return (len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+ iph->nexthdr == IPPROTO_ICMPV6 &&
+ msg->icmph.icmp6_code == 0 &&
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION);
+}
+
+
static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
{
__be16 proto = htons(ETH_P_802_3);
@@ -198,6 +252,8 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
proto = htons(ETH_P_IP);
break;
case 0x60:
+ if (is_neigh_solicit(buf, len))
+ do_neigh_solicit(dev, buf, tci);
proto = htons(ETH_P_IPV6);
break;
default:
@@ -313,15 +369,13 @@ error:
static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
{
- int ret = 0;
+ int ret = -ENODEV;
struct usbnet *dev = usb_get_intfdata(intf);
struct cdc_mbim_state *info = (void *)&dev->data;
struct cdc_ncm_ctx *ctx = info->ctx;
- if (ctx == NULL) {
- ret = -1;
+ if (!ctx)
goto error;
- }
/*
* Both usbnet_suspend() and subdriver->suspend() MUST return 0
@@ -354,7 +408,7 @@ static int cdc_mbim_resume(struct usb_interface *intf)
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -371,9 +425,18 @@ static const struct driver_info cdc_mbim_info = {
};
/* MBIM and NCM devices should not need a ZLP after NTBs with
- * dwNtbOutMaxSize length. This driver_info is for the exceptional
- * devices requiring it anyway, allowing them to be supported without
- * forcing the performance penalty on all the sane devices.
+ * dwNtbOutMaxSize length. Nevertheless, a number of devices from
+ * different vendor IDs will fail unless we send ZLPs, forcing us
+ * to make this the default.
+ *
+ * This default may cause a performance penalty for spec conforming
+ * devices wanting to take advantage of optimizations possible without
+ * ZLPs. A whitelist is added in an attempt to avoid this for devices
+ * known to conform to the MBIM specification.
+ *
+ * All known devices supporting NCM compatibility mode are also
+ * conforming to the NCM and MBIM specifications. For this reason, the
+ * NCM subclass entry is also in the ZLP whitelist.
*/
static const struct driver_info cdc_mbim_info_zlp = {
.description = "CDC MBIM",
@@ -396,16 +459,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Sierra Wireless MC7710 need ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
- },
- /* HP hs2434 Mobile Broadband Module needs ZLPs */
- { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ /* ZLP conformance whitelist: All Ericsson MBIM devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_mbim_info,
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
{
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 43afde8f48d2..f74786aa37be 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,8 +53,6 @@
#include <linux/usb/cdc.h>
#include <linux/usb/cdc_ncm.h>
-#define DRIVER_VERSION "14-Mar-2012"
-
#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
static bool prefer_mbim = true;
#else
@@ -68,71 +66,67 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
-static void
-cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- struct usbnet *dev = netdev_priv(net);
-
- strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, dev->driver_info->description,
- sizeof(info->fw_version));
- usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+static u8 cdc_ncm_setup(struct usbnet *dev)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
int err;
int eth_hlen;
u16 ntb_fmt_supported;
- u32 min_dgram_size;
- u32 min_hdr_size;
- struct usbnet *dev = netdev_priv(ctx->netdev);
+ __le16 max_datagram_size;
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ctx->ncm_parm,
- sizeof(ctx->ncm_parm));
+ 0, iface_no, &ncm_parm,
+ sizeof(ncm_parm));
if (err < 0) {
- pr_debug("failed GET_NTB_PARAMETERS\n");
- return 1;
+ dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+ return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
- eth_hlen = ETH_HLEN;
- min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
- if (ctx->mbim_desc != NULL) {
- flags = ctx->mbim_desc->bmNetworkCapabilities;
+ /* there are some minor differences in NCM and MBIM defaults */
+ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+ if (!ctx->mbim_desc)
+ return -EINVAL;
eth_hlen = 0;
- min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
- min_hdr_size = 0;
- } else if (ctx->func_desc != NULL) {
- flags = ctx->func_desc->bmNetworkCapabilities;
+ flags = ctx->mbim_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
} else {
- flags = 0;
+ if (!ctx->func_desc)
+ return -EINVAL;
+ eth_hlen = ETH_HLEN;
+ flags = ctx->func_desc->bmNetworkCapabilities;
+ ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
}
- pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
- "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
- "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
- ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+ /* common absolute max for NCM and MBIM */
+ if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+
+ dev_dbg(&dev->intf->dev,
+ "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
+ ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
@@ -141,19 +135,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* verify maximum size of received NTB in bytes */
if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
- pr_debug("Using min receive length=%d\n",
- USB_CDC_NCM_NTB_MIN_IN_SIZE);
+ dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
+ USB_CDC_NCM_NTB_MIN_IN_SIZE);
ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
}
if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
- pr_debug("Using default maximum receive length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_RX);
+ dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_RX);
ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -161,16 +155,22 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
| USB_RECIP_INTERFACE,
0, iface_no, &dwNtbInMaxSize, 4);
if (err < 0)
- pr_debug("Setting NTB Input Size failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
}
/* verify maximum size of transmitted NTB in bytes */
- if ((ctx->tx_max <
- (min_hdr_size + min_dgram_size)) ||
- (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
- pr_debug("Using default maximum transmit length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_TX);
+ if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
+ dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+
+ /* Adding a pad byte here simplifies the handling in
+ * cdc_ncm_fill_tx_frame, by making tx_max always
+ * represent the real skb max size.
+ */
+ if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
}
/*
@@ -183,7 +183,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default alignment: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default alignment: 4 bytes\n");
ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
@@ -197,13 +197,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
(val != ((-val) & val)) || (val >= ctx->tx_max)) {
- pr_debug("Using default transmit modulus: 4 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit modulus: 4 bytes\n");
ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
}
/* verify the payload remainder */
if (ctx->tx_remainder >= ctx->tx_modulus) {
- pr_debug("Using default transmit remainder: 0 bytes\n");
+ dev_dbg(&dev->intf->dev, "Using default transmit remainder: 0 bytes\n");
ctx->tx_remainder = 0;
}
@@ -221,7 +221,7 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_CRC_NOT_APPENDED,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting CRC mode off failed\n");
+ dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
@@ -232,69 +232,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
USB_CDC_NCM_NTB16_FORMAT,
iface_no, NULL, 0);
if (err < 0)
- pr_debug("Setting NTB format to 16-bit failed\n");
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
}
- ctx->max_datagram_size = min_dgram_size;
+ /* inform the device about the selected Max Datagram Size */
+ if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+ goto out;
- /* set Max Datagram Size (MTU) */
- if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 max_datagram_size;
- u16 eth_max_sz;
- if (ctx->ether_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- else if (ctx->mbim_desc != NULL)
- eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
- else
- goto max_dgram_err;
-
- err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
- pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
- min_dgram_size);
- } else {
- ctx->max_datagram_size =
- le16_to_cpu(max_datagram_size);
- /* Check Eth descriptor value */
- if (ctx->max_datagram_size > eth_max_sz)
- ctx->max_datagram_size = eth_max_sz;
-
- if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
-
- if (ctx->max_datagram_size < min_dgram_size)
- ctx->max_datagram_size = min_dgram_size;
-
- /* if value changed, update device */
- if (ctx->max_datagram_size !=
- le16_to_cpu(max_datagram_size)) {
- err = usbnet_write_cmd(dev,
- USB_CDC_SET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0,
- iface_no, &max_datagram_size,
- 2);
- if (err < 0)
- pr_debug("SET_MAX_DGRAM_SIZE failed\n");
- }
- }
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
}
-max_dgram_err:
- if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
- ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
+ if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+ goto out;
+
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+out:
+ /* set MTU to max supported by the device if necessary */
+ if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
+ dev->net->mtu = ctx->max_datagram_size - eth_hlen;
return 0;
}
static void
-cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
{
- struct usb_host_endpoint *e;
+ struct usb_host_endpoint *e, *in = NULL, *out = NULL;
u8 ep;
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
@@ -303,18 +277,18 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->status_ep == NULL)
- ctx->status_ep = e;
+ if (!dev->status)
+ dev->status = e;
}
break;
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(&e->desc)) {
- if (ctx->in_ep == NULL)
- ctx->in_ep = e;
+ if (!in)
+ in = e;
} else {
- if (ctx->out_ep == NULL)
- ctx->out_ep = e;
+ if (!out)
+ out = e;
}
break;
@@ -322,6 +296,14 @@ cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
break;
}
}
+ if (in && !dev->in)
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+ if (out && !dev->out)
+ dev->out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
}
static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
@@ -342,18 +324,9 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
kfree(ctx);
}
-static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_drvinfo = cdc_ncm_get_drvinfo,
- .get_link = usbnet_get_link,
- .get_msglevel = usbnet_get_msglevel,
- .set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
- .nway_reset = usbnet_nway_reset,
-};
-
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
{
+ const struct usb_cdc_union_desc *union_desc = NULL;
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
@@ -367,23 +340,22 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)ctx;
+ ctx->bh.data = (unsigned long)dev;
ctx->bh.func = cdc_ncm_txpath_bh;
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
- ctx->netdev = dev->net;
/* store ctx pointer in device data field */
dev->data[0] = (unsigned long)ctx;
+ /* only the control interface can be successfully probed */
+ ctx->control = intf;
+
/* get some pointers */
driver = driver_of(intf);
buf = intf->cur_altsetting->extra;
len = intf->cur_altsetting->extralen;
- ctx->udev = dev->udev;
- ctx->intf = intf;
-
/* parse through descriptors associated with control interface */
while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
@@ -392,16 +364,18 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
switch (buf[2]) {
case USB_CDC_UNION_TYPE:
- if (buf[0] < sizeof(*(ctx->union_desc)))
+ if (buf[0] < sizeof(*union_desc))
break;
- ctx->union_desc =
- (const struct usb_cdc_union_desc *)buf;
-
- ctx->control = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bMasterInterface0);
+ union_desc = (const struct usb_cdc_union_desc *)buf;
+ /* the master must be the interface we are probing */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ union_desc->bMasterInterface0) {
+ dev_dbg(&intf->dev, "bogus CDC Union\n");
+ goto error;
+ }
ctx->data = usb_ifnum_to_if(dev->udev,
- ctx->union_desc->bSlaveInterface0);
+ union_desc->bSlaveInterface0);
break;
case USB_CDC_ETHERNET_TYPE:
@@ -410,13 +384,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->ether_desc =
(const struct usb_cdc_ether_desc *)buf;
- dev->hard_mtu =
- le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
- else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
- dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
break;
case USB_CDC_NCM_TYPE:
@@ -444,69 +411,71 @@ advance:
}
/* some buggy devices have an IAD but no CDC Union */
- if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
- ctx->control = intf;
+ if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
/* check if we got everything */
- if ((ctx->control == NULL) || (ctx->data == NULL) ||
- ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
+ if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
+ dev_dbg(&intf->dev, "CDC descriptors missing\n");
goto error;
+ }
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to claim data intf\n");
goto error;
+ }
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* reset data interface */
temp = usb_set_interface(dev->udev, iface_no, 0);
- if (temp)
- goto error2;
-
- /* initialize data interface */
- if (cdc_ncm_setup(ctx))
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
+ }
- cdc_ncm_find_endpoints(ctx, ctx->data);
- cdc_ncm_find_endpoints(ctx, ctx->control);
-
- if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
- (ctx->status_ep == NULL))
+ cdc_ncm_find_endpoints(dev, ctx->data);
+ cdc_ncm_find_endpoints(dev, ctx->control);
+ if (!dev->in || !dev->out || !dev->status) {
+ dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
+ }
- dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev)) {
+ dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+ goto error2;
+ }
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
- usb_set_intfdata(ctx->intf, dev);
if (ctx->ether_desc) {
temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
- if (temp)
+ if (temp) {
+ dev_dbg(&intf->dev, "failed to get mac address\n");
goto error2;
- dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+ }
+ dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
-
- dev->in = usb_rcvbulkpipe(dev->udev,
- ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->out = usb_sndbulkpipe(dev->udev,
- ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->status = ctx->status_ep;
+ /* usbnet use these values for sizing tx/rx queues */
+ dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
- ctx->tx_speed = ctx->rx_speed = 0;
return 0;
error2:
@@ -517,7 +486,7 @@ error2:
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
- dev_info(&dev->udev->dev, "bind() failure\n");
+ dev_info(&intf->dev, "bind() failure\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
@@ -553,7 +522,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
ctx->control = NULL;
}
- usb_set_intfdata(ctx->intf, NULL);
+ usb_set_intfdata(intf, NULL);
cdc_ncm_free(ctx);
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
@@ -662,8 +631,9 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
}
struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
struct sk_buff *skb_out;
@@ -683,11 +653,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* allocate a new OUT skb */
if (!skb_out) {
- skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
+ skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
goto exit_no_skb;
}
@@ -725,12 +695,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
} else {
/* no room for skb - store for later */
if (ctx->tx_rem_skb != NULL) {
dev_kfree_skb_any(ctx->tx_rem_skb);
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
ctx->tx_rem_sign = sign;
@@ -763,7 +733,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
if (skb != NULL) {
dev_kfree_skb_any(skb);
skb = NULL;
- ctx->netdev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
}
ctx->tx_curr_frame_num = n;
@@ -788,19 +758,20 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* variables will be reset at next call */
}
- /*
- * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
- * we send buffers as it is. If we get more data, it would be more
- * efficient for USB HS mobile device with DMA engine to receive a full
- * size NTB, than canceling DMA transfer and receiving a short packet.
+ /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+ * bytes, we send buffers as it is. If we get more data, it
+ * would be more efficient for USB HS mobile device with DMA
+ * engine to receive a full size NTB, than canceling DMA
+ * transfer and receiving a short packet.
+ *
+ * This optimization support is pointless if we end up sending
+ * a ZLP after full sized NTBs.
*/
- if (skb_out->len > CDC_NCM_MIN_TX_PKT)
- /* final zero padding */
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
-
- /* do we need to prevent a ZLP? */
- if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
- (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+ skb_out->len > CDC_NCM_MIN_TX_PKT)
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
+ ctx->tx_max - skb_out->len);
+ else if ((skb_out->len % dev->maxpacket) == 0)
*skb_put(skb_out, 1) = 0; /* force short packet */
/* set final frame length */
@@ -809,7 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
- ctx->netdev->stats.tx_packets += ctx->tx_curr_frame_num;
+ dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
return skb_out;
exit_no_skb:
@@ -841,24 +812,25 @@ static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *timer)
static void cdc_ncm_txpath_bh(unsigned long param)
{
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)param;
+ struct usbnet *dev = (struct usbnet *)param;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
spin_lock_bh(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
spin_unlock_bh(&ctx->mtx);
- } else if (ctx->netdev != NULL) {
+ } else if (dev->net != NULL) {
spin_unlock_bh(&ctx->mtx);
- netif_tx_lock_bh(ctx->netdev);
- usbnet_start_xmit(NULL, ctx->netdev);
- netif_tx_unlock_bh(ctx->netdev);
+ netif_tx_lock_bh(dev->net);
+ usbnet_start_xmit(NULL, dev->net);
+ netif_tx_unlock_bh(dev->net);
} else {
spin_unlock_bh(&ctx->mtx);
}
}
-static struct sk_buff *
+struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb_out;
@@ -875,7 +847,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -885,10 +857,12 @@ error:
return NULL;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_tx_fixup);
/* verify NTB header and return offset of first NDP, or negative error */
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_nth16 *nth16;
int len;
int ret = -EINVAL;
@@ -898,30 +872,33 @@ int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) +
sizeof(struct usb_cdc_ncm_ndp16))) {
- pr_debug("frame too short\n");
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
goto error;
}
nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data;
- if (le32_to_cpu(nth16->dwSignature) != USB_CDC_NCM_NTH16_SIGN) {
- pr_debug("invalid NTH16 signature <%u>\n",
- le32_to_cpu(nth16->dwSignature));
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
goto error;
}
len = le16_to_cpu(nth16->wBlockLength);
if (len > ctx->rx_max) {
- pr_debug("unsupported NTB block length %u/%u\n", len,
- ctx->rx_max);
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
goto error;
}
if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
- (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
- !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
- pr_debug("sequence number glitch prev=%d curr=%d\n",
- ctx->rx_seq, le16_to_cpu(nth16->wSequence));
+ (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth16->wSequence));
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
@@ -934,18 +911,20 @@ EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
- pr_debug("invalid NDP offset <%u>\n", ndpoffset);
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
goto error;
}
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
- pr_debug("invalid DPT16 length <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
goto error;
}
@@ -954,9 +933,9 @@ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
- if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
- skb_in->len) {
- pr_debug("Invalid nframes = %d\n", ret);
+ if ((sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
@@ -965,7 +944,7 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
@@ -989,9 +968,10 @@ next_ndp:
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
- pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp16->dwSignature));
goto err_ndp;
}
dpe16 = ndp16->dpe16;
@@ -1013,9 +993,9 @@ next_ndp:
/* sanity checking */
if (((offset + len) > skb_in->len) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
- pr_debug("invalid frame detected (ignored)"
- "offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, len, skb_in);
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, len, skb_in);
if (!x)
goto err_ndp;
break;
@@ -1040,9 +1020,10 @@ err_ndp:
error:
return 0;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_fixup);
static void
-cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+cdc_ncm_speed_change(struct usbnet *dev,
struct usb_cdc_speed_change *data)
{
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
@@ -1052,25 +1033,16 @@ cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
* Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead.
*/
- if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
- ctx->tx_speed = tx_speed;
- ctx->rx_speed = rx_speed;
-
- if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u mbit/s downlink "
- "%u mbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
- } else {
- printk(KERN_INFO KBUILD_MODNAME
- ": %s: %u kbit/s downlink "
- "%u kbit/s uplink\n",
- ctx->netdev->name,
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
- }
+ if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+ netif_info(dev, link, dev->net,
+ "%u mbit/s downlink %u mbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
+ } else {
+ netif_info(dev, link, dev->net,
+ "%u kbit/s downlink %u kbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
}
}
@@ -1086,7 +1058,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
/* test for split data in 8-byte chunks */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
- cdc_ncm_speed_change(ctx,
+ cdc_ncm_speed_change(dev,
(struct usb_cdc_speed_change *)urb->transfer_buffer);
return;
}
@@ -1101,14 +1073,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
ctx->connected = le16_to_cpu(event->wValue);
-
- printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
- " %sconnected\n",
- ctx->netdev->name, ctx->connected ? "" : "dis");
-
+ netif_info(dev, link, dev->net,
+ "network connection: %sconnected\n",
+ ctx->connected ? "" : "dis");
usbnet_link_change(dev, ctx->connected, 0);
- if (!ctx->connected)
- ctx->tx_speed = ctx->rx_speed = 0;
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1116,8 +1084,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
sizeof(struct usb_cdc_speed_change)))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
- cdc_ncm_speed_change(ctx,
- (struct usb_cdc_speed_change *) &event[1]);
+ cdc_ncm_speed_change(dev,
+ (struct usb_cdc_speed_change *)&event[1]);
break;
default:
@@ -1139,22 +1107,6 @@ static int cdc_ncm_check_connect(struct usbnet *dev)
return !ctx->connected;
}
-static int
-cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
-{
- return usbnet_probe(udev, prod);
-}
-
-static void cdc_ncm_disconnect(struct usb_interface *intf)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
-
- if (dev == NULL)
- return; /* already disconnected */
-
- usbnet_disconnect(intf);
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
@@ -1234,17 +1186,6 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
- /* Huawei NCM devices disguised as vendor specific */
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
- .driver_info = (unsigned long)&wwan_info,
- },
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
- .driver_info = (unsigned long)&wwan_info,
- },
- { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
- .driver_info = (unsigned long)&wwan_info,
- },
-
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
USB_CLASS_COMM,
@@ -1265,8 +1206,8 @@ MODULE_DEVICE_TABLE(usb, cdc_devs);
static struct usb_driver cdc_ncm_driver = {
.name = "cdc_ncm",
.id_table = cdc_devs,
- .probe = cdc_ncm_probe,
- .disconnect = cdc_ncm_disconnect,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
new file mode 100644
index 000000000000..312178d7b698
--- /dev/null
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -0,0 +1,230 @@
+/* huawei_cdc_ncm.c - handles Huawei devices using the CDC NCM protocol as
+ * transport layer.
+ * Copyright (C) 2013 Enrico Mioso <mrkiko.rs@gmail.com>
+ *
+ *
+ * ABSTRACT:
+ * This driver handles devices resembling the CDC NCM standard, but
+ * encapsulating another protocol inside it. An example are some Huawei 3G
+ * devices, exposing an embedded AT channel where you can set up the NCM
+ * connection.
+ * This code has been heavily inspired by the cdc_mbim.c driver, which is
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* Driver data */
+struct huawei_cdc_ncm_state {
+ struct cdc_ncm_ctx *ctx;
+ atomic_t pmcount;
+ struct usb_driver *subdriver;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+static int huawei_cdc_ncm_manage_power(struct usbnet *usbnet_dev, int on)
+{
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ int rv;
+
+ if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
+ (!on && atomic_dec_and_test(&drvstate->pmcount))) {
+ rv = usb_autopm_get_interface(usbnet_dev->intf);
+ usbnet_dev->intf->needs_remote_wakeup = on;
+ if (!rv)
+ usb_autopm_put_interface(usbnet_dev->intf);
+ }
+ return 0;
+}
+
+static int huawei_cdc_ncm_wdm_manage_power(struct usb_interface *intf,
+ int status)
+{
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!usbnet_dev)
+ return 0;
+
+ return huawei_cdc_ncm_manage_power(usbnet_dev, status);
+}
+
+
+static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
+ struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+ int ret = -ENODEV;
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+
+ /* altsetting should always be 1 for NCM devices - so we hard-coded
+ * it here
+ */
+ ret = cdc_ncm_bind_common(usbnet_dev, intf, 1);
+ if (ret)
+ goto err;
+
+ ctx = drvstate->ctx;
+
+ if (usbnet_dev->status)
+ /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
+ * decimal (0x100)"
+ */
+ subdriver = usb_cdc_wdm_register(ctx->control,
+ &usbnet_dev->status->desc,
+ 256, /* wMaxCommand */
+ huawei_cdc_ncm_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ ret = PTR_ERR(subdriver);
+ cdc_ncm_unbind(usbnet_dev, intf);
+ goto err;
+ }
+
+ /* Prevent usbnet from using the status descriptor */
+ usbnet_dev->status = NULL;
+
+ drvstate->subdriver = subdriver;
+
+err:
+ return ret;
+}
+
+static void huawei_cdc_ncm_unbind(struct usbnet *usbnet_dev,
+ struct usb_interface *intf)
+{
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ if (drvstate->subdriver && drvstate->subdriver->disconnect)
+ drvstate->subdriver->disconnect(ctx->control);
+ drvstate->subdriver = NULL;
+
+ cdc_ncm_unbind(usbnet_dev, intf);
+}
+
+static int huawei_cdc_ncm_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ int ret = 0;
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ if (ctx == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = usbnet_suspend(intf, message);
+ if (ret < 0)
+ goto error;
+
+ if (intf == ctx->control &&
+ drvstate->subdriver &&
+ drvstate->subdriver->suspend)
+ ret = drvstate->subdriver->suspend(intf, message);
+ if (ret < 0)
+ usbnet_resume(intf);
+
+error:
+ return ret;
+}
+
+static int huawei_cdc_ncm_resume(struct usb_interface *intf)
+{
+ int ret = 0;
+ struct usbnet *usbnet_dev = usb_get_intfdata(intf);
+ struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
+ bool callsub;
+ struct cdc_ncm_ctx *ctx = drvstate->ctx;
+
+ /* should we call subdriver's resume function? */
+ callsub =
+ (intf == ctx->control &&
+ drvstate->subdriver &&
+ drvstate->subdriver->resume);
+
+ if (callsub)
+ ret = drvstate->subdriver->resume(intf);
+ if (ret < 0)
+ goto err;
+ ret = usbnet_resume(intf);
+ if (ret < 0 && callsub)
+ drvstate->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+ return ret;
+}
+
+static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
+{
+ struct cdc_ncm_ctx *ctx;
+
+ ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
+
+ if (ctx == NULL)
+ return 1; /* disconnected */
+
+ return !ctx->connected;
+}
+
+static const struct driver_info huawei_cdc_ncm_info = {
+ .description = "Huawei CDC NCM device",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = huawei_cdc_ncm_bind,
+ .unbind = huawei_cdc_ncm_unbind,
+ .check_connect = huawei_cdc_ncm_check_connect,
+ .manage_power = huawei_cdc_ncm_manage_power,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static const struct usb_device_id huawei_cdc_ncm_devs[] = {
+ /* Huawei NCM devices disguised as vendor specific */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
+
+ /* Terminating entry */
+ {
+ },
+};
+MODULE_DEVICE_TABLE(usb, huawei_cdc_ncm_devs);
+
+static struct usb_driver huawei_cdc_ncm_driver = {
+ .name = "huawei_cdc_ncm",
+ .id_table = huawei_cdc_ncm_devs,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = huawei_cdc_ncm_suspend,
+ .resume = huawei_cdc_ncm_resume,
+ .reset_resume = huawei_cdc_ncm_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(huawei_cdc_ncm_driver);
+MODULE_AUTHOR("Enrico Mioso <mrkiko.rs@gmail.com>");
+MODULE_DESCRIPTION("USB CDC NCM host driver with encapsulated protocol support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 818ce90185b5..23bdd5b9274d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -143,24 +143,28 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
struct qmi_wwan_state *info = (void *)&dev->data;
- int rv = 0;
+ int rv;
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+ atomic_read(&info->pmcount), on);
- if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+ (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees
+ * the new value
+ */
rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
+ if (!rv)
+ usb_autopm_put_interface(dev->intf);
}
-err:
- return rv;
+ return 0;
}
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
@@ -199,7 +203,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
atomic_set(&info->pmcount, 0);
/* register subdriver */
- subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+ 4096, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
@@ -228,7 +233,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_driver *driver = driver_of(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
- BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+ sizeof(struct qmi_wwan_state)));
/* set up initial state */
info->control = intf;
@@ -250,7 +256,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
- dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC header len %u\n",
+ h->bLength);
goto err;
}
break;
@@ -260,7 +267,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
- dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC union len %u\n",
+ h->bLength);
goto err;
}
cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +279,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
- dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
+ dev_dbg(&intf->dev, "CDC ether len %u\n",
+ h->bLength);
goto err;
}
cdc_ether = (struct usb_cdc_ether_desc *)buf;
break;
}
- /*
- * Remember which CDC functional descriptors we've seen. Works
+ /* Remember which CDC functional descriptors we've seen. Works
* for all types we care about, of which USB_CDC_ETHERNET_TYPE
* (0x0f) is the highest numbered
*/
@@ -293,10 +301,14 @@ next_desc:
/* Use separate control and data interfaces if we found a CDC Union */
if (cdc_union) {
- info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
- if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
- dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
- cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+ info->data = usb_ifnum_to_if(dev->udev,
+ cdc_union->bSlaveInterface0);
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+ !info->data) {
+ dev_err(&intf->dev,
+ "bogus CDC Union: master=%u, slave=%u\n",
+ cdc_union->bMasterInterface0,
+ cdc_union->bSlaveInterface0);
goto err;
}
}
@@ -374,8 +386,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
- /*
- * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+ /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
* in system sleep context, otherwise, the resume callback has
* to recover device from previous suspend failure.
*/
@@ -383,7 +394,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
if (ret < 0)
goto err;
- if (intf == info->control && info->subdriver && info->subdriver->suspend)
+ if (intf == info->control && info->subdriver &&
+ info->subdriver->suspend)
ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
@@ -396,14 +408,15 @@ static int qmi_wwan_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+ bool callsub = (intf == info->control && info->subdriver &&
+ info->subdriver->resume);
if (callsub)
ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && callsub && info->subdriver->suspend)
+ if (ret < 0 && callsub)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
@@ -714,6 +727,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
@@ -777,7 +791,8 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+ const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index eee1f19ef1e9..b24db7acbf12 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
return tot;
}
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -250,11 +255,14 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_start_xmit = veth_xmit,
.ndo_change_mtu = veth_change_mtu,
.ndo_get_stats64 = veth_get_stats64,
+ .ndo_set_rx_mode = veth_set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
@@ -273,6 +281,7 @@ static void veth_setup(struct net_device *dev)
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
+ dev->hw_enc_features = VETH_FEATURES;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bbc9cb84ec1f..01f4eb5c8b78 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -124,12 +124,14 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
+ /* Page_frag for GFP_KERNEL packet buffer allocation when we run
+ * low on memory.
+ */
+ struct page_frag alloc_frag;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
- /* Per-cpu variable to show the mapping from CPU to virtqueue */
- int __percpu *vq_index;
-
/* CPU hot plug notifier */
struct notifier_block nb;
};
@@ -217,33 +219,18 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
-static void set_skb_frag(struct sk_buff *skb, struct page *page,
- unsigned int offset, unsigned int *len)
-{
- int size = min((unsigned)PAGE_SIZE - offset, *len);
- int i = skb_shinfo(skb)->nr_frags;
-
- __skb_fill_page_desc(skb, i, page, offset, size);
-
- skb->data_len += size;
- skb->len += size;
- skb->truesize += PAGE_SIZE;
- skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
- *len -= size;
-}
-
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq,
- struct page *page, unsigned int len)
+ struct page *page, unsigned int offset,
+ unsigned int len, unsigned int truesize)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- unsigned int copy, hdr_len, offset;
+ unsigned int copy, hdr_len, hdr_padded_len;
char *p;
- p = page_address(page);
+ p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +241,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr;
- offset = hdr_len;
+ hdr_padded_len = sizeof hdr->mhdr;
} else {
hdr_len = sizeof hdr->hdr;
- offset = sizeof(struct padded_vnet_hdr);
+ hdr_padded_len = sizeof(struct padded_vnet_hdr);
}
memcpy(hdr, p, hdr_len);
len -= hdr_len;
- p += offset;
+ offset += hdr_padded_len;
+ p += hdr_padded_len;
copy = len;
if (copy > skb_tailroom(skb))
@@ -273,6 +261,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
len -= copy;
offset += copy;
+ if (vi->mergeable_rx_bufs) {
+ if (len)
+ skb_add_rx_frag(skb, 0, page, offset, len, truesize);
+ else
+ put_page(page);
+ return skb;
+ }
+
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
@@ -284,9 +280,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
dev_kfree_skb(skb);
return NULL;
}
-
+ BUG_ON(offset >= PAGE_SIZE);
while (len) {
- set_skb_frag(skb, page, offset, &len);
+ unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
+ frag_size, truesize);
+ len -= frag_size;
page = (struct page *)page->private;
offset = 0;
}
@@ -297,33 +296,59 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
+static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+ struct sk_buff *curr_skb = head_skb;
+ char *buf;
struct page *page;
- int num_buf, i, len;
+ int num_buf, len, offset;
num_buf = hdr->mhdr.num_buffers;
while (--num_buf) {
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long\n", skb->dev->name);
- skb->dev->stats.rx_length_errors++;
- return -EINVAL;
- }
- page = virtqueue_get_buf(rq->vq, &len);
- if (!page) {
+ int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
- skb->dev->name, hdr->mhdr.num_buffers);
- skb->dev->stats.rx_length_errors++;
+ head_skb->dev->name, hdr->mhdr.num_buffers);
+ head_skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
-
- set_skb_frag(skb, page, 0, &len);
-
+ if (unlikely(len > MAX_PACKET_LEN)) {
+ pr_debug("%s: rx error: merge buffer too long\n",
+ head_skb->dev->name);
+ len = MAX_PACKET_LEN;
+ }
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ head_skb->dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += MAX_PACKET_LEN;
+ }
+ page = virt_to_head_page(buf);
+ offset = buf - (char *)page_address(page);
+ if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+ put_page(page);
+ skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+ len, MAX_PACKET_LEN);
+ } else {
+ skb_add_rx_frag(curr_skb, num_skb_frags, page,
+ offset, len,
+ MAX_PACKET_LEN);
+ }
--rq->num;
}
return 0;
@@ -341,8 +366,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(rq, buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
return;
@@ -352,19 +379,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb = buf;
len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len);
+ } else if (vi->mergeable_rx_bufs) {
+ struct page *page = virt_to_head_page(buf);
+ skb = page_to_skb(rq, page,
+ (char *)buf - (char *)page_address(page),
+ len, MAX_PACKET_LEN);
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ put_page(page);
+ return;
+ }
+ if (receive_mergeable(rq, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
} else {
page = buf;
- skb = page_to_skb(rq, page, len);
+ skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
give_pages(rq, page);
return;
}
- if (vi->mergeable_rx_bufs)
- if (receive_mergeable(rq, skb)) {
- dev_kfree_skb(skb);
- return;
- }
}
hdr = skb_vnet_hdr(skb);
@@ -501,18 +537,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct page *page;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ char *buf = NULL;
int err;
- page = get_a_page(rq, gfp);
- if (!page)
+ if (gfp & __GFP_WAIT) {
+ if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
+ gfp)) {
+ buf = (char *)page_address(vi->alloc_frag.page) +
+ vi->alloc_frag.offset;
+ get_page(vi->alloc_frag.page);
+ vi->alloc_frag.offset += MAX_PACKET_LEN;
+ }
+ } else {
+ buf = netdev_alloc_frag(MAX_PACKET_LEN);
+ }
+ if (!buf)
return -ENOMEM;
- sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
-
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
+ sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
- give_pages(rq, page);
+ put_page(virt_to_head_page(buf));
return err;
}
@@ -1065,7 +1111,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
- int cpu;
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1075,16 +1120,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
vi->affinity_hint_set = false;
}
-
- i = 0;
- for_each_online_cpu(cpu) {
- if (cpu == hcpu) {
- *per_cpu_ptr(vi->vq_index, cpu) = -1;
- } else {
- *per_cpu_ptr(vi->vq_index, cpu) =
- ++i % vi->curr_queue_pairs;
- }
- }
}
static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1106,7 +1141,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
- *per_cpu_ptr(vi->vq_index, cpu) = i;
+ netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
i++;
}
@@ -1220,28 +1255,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-/* To avoid contending a lock hold by a vcpu who would exit to host, select the
- * txq based on the processor id.
- */
-static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- int txq;
- struct virtnet_info *vi = netdev_priv(dev);
-
- if (skb_rx_queue_recorded(skb)) {
- txq = skb_get_rx_queue(skb);
- } else {
- txq = *__this_cpu_ptr(vi->vq_index);
- if (txq == -1)
- txq = 0;
- }
-
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
-
- return txq;
-}
-
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -1253,7 +1266,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
- .ndo_select_queue = virtnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
@@ -1336,8 +1348,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs || vi->big_packets)
+ if (vi->big_packets)
give_pages(&vi->rq[i], buf);
+ else if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
else
dev_kfree_skb(buf);
--vi->rq[i].num;
@@ -1562,10 +1576,6 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL)
goto free;
- vi->vq_index = alloc_percpu(int);
- if (vi->vq_index == NULL)
- goto free_stats;
-
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1592,7 +1602,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_index;
+ goto free_stats;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
@@ -1643,8 +1653,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
-free_index:
- free_percpu(vi->vq_index);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
free_stats:
free_percpu(vi->stats);
free:
@@ -1678,10 +1688,11 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
+ if (vi->alloc_frag.page)
+ put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
- free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a03f358fd58b..12040a35d95d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -410,9 +410,9 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
extern char vmxnet3_driver_name[];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2ef5b6219f3f..78df8f39e57c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -60,10 +60,6 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -2087,7 +2083,7 @@ static void vxlan_setup(struct net_device *dev)
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(dev_net(dev), &low, &high);
vxlan->port_min = low;
vxlan->port_max = high;
vxlan->dst_port = htons(vxlan_port);
@@ -2180,7 +2176,7 @@ static void vxlan_del_work(struct work_struct *work)
* could be used for both IPv4 and IPv6 communications, but
* users may set bindv6only=1.
*/
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2193,7 +2189,7 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDPv6 socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2208,28 +2204,27 @@ static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
&vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
/* At this point, IPv6 module should have been loaded in
* sock_create_kern().
*/
BUG_ON(!ipv6_stub);
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
#else
-static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v6_sock(struct net *net, __be16 port)
{
- return -EPFNOSUPPORT;
+ return ERR_PTR(-EPFNOSUPPORT);
}
#endif
-static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
+static struct socket *create_v4_sock(struct net *net, __be16 port)
{
struct sock *sk;
struct socket *sock;
@@ -2244,7 +2239,7 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDP socket create failed\n");
- return rc;
+ return ERR_PTR(rc);
}
/* Put in proper namespace */
@@ -2257,13 +2252,12 @@ static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
sk_release_kernel(sk);
- return rc;
+ return ERR_PTR(rc);
}
- *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
- return 0;
+ return sock;
}
/* Create new listen socket if needed */
@@ -2274,7 +2268,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct vxlan_sock *vs;
struct socket *sock;
struct sock *sk;
- int rc = 0;
unsigned int h;
vs = kmalloc(sizeof(*vs), GFP_KERNEL);
@@ -2287,12 +2280,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
INIT_WORK(&vs->del_work, vxlan_del_work);
if (ipv6)
- rc = create_v6_sock(net, port, &sock);
+ sock = create_v6_sock(net, port);
else
- rc = create_v4_sock(net, port, &sock);
- if (rc < 0) {
+ sock = create_v4_sock(net, port);
+ if (IS_ERR(sock)) {
kfree(vs);
- return ERR_PTR(rc);
+ return ERR_CAST(sock);
}
vs->sock = sock;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 3d80e4267de8..3d741663fd67 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"Hostess SV11", sv) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_irq;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4f7748478984..27860b4f5908 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
- if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, z8530_interrupt, 0,
"SeaLevel", dev) < 0) {
pr_warn("IRQ %d already in use\n", irq);
goto err_request_irq;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index 8f0fc2e57e2b..f57ee67836ae 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -41,6 +41,6 @@ struct x25_asy {
#define X25_ASY_MAGIC 0x5303
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index f29d554fc07d..2416a9d60bd6 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -395,20 +395,19 @@ struct z8530_dev
extern u8 z8530_dead_port[];
extern u8 z8530_hdlc_kilostream_85230[];
extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
- struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
/*
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 9f1e947f3557..649ecad6844c 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
i2400mu->rx_size_auto_shrink = 1;
}
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
- const struct i2400m_bootrom_header *,
- size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
- struct i2400m_bootrom_header *,
- size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *, size_t,
+ int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 79c6505b5c20..5a34e72bab9a 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -710,18 +710,18 @@ enum i2400m_bri {
I2400M_BRI_MAC_REINIT = 1 << 3,
};
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
static inline
int i2400m_is_d2h_barker(const void *buf)
{
const __le32 *barker = buf;
return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
}
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
/* Make/grok boot-rom header commands */
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
/*
* Driver / device setup and internal functions
*/
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
- const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
- enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+ int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
#ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
#else
static inline int i2400m_debugfs_add(struct i2400m *i2400m)
{
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
#endif
/* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
extern struct attribute_group i2400m_dev_attr_group;
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
dev_put(i2400m->wimax_dev.net_dev);
}
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
* the bus-specific drivers.
*/
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
/*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
return i2400m->wimax_dev.net_dev->dev.parent;
}
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
- char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
- const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
static inline
struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
return &iface->cur_altsetting->endpoint[ep].desc;
}
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
- enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
- struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+ const struct i2400m_tlv_rf_switches_status *);
/*
* Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
/* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9a24e599dee..cfce83e1f273 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7fe19648f10e..edf4b57c4aaa 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1abf1d421173..c63d1159db5c 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,6 +25,23 @@ config ATH_DEBUG
Say Y, if you want to debug atheros wireless drivers.
Right now only ath9k makes use of this.
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+ bool "Atheros dynamic user regulatory hints"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled in countries where
+ this feature is explicitly allowed and only on cards that
+ specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+ bool "Atheros dynamic user regulatory testing"
+ depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems
+ undergoing certification testing.
+
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
@@ -32,5 +49,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index fb05cfd19361..7d023b0f13b4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -5,13 +5,16 @@ obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH_COMMON) += ath.o
ath-objs := main.o \
regd.o \
hw.o \
- key.o
+ key.o \
+ dfs_pattern_detector.o \
+ dfs_pri_detector.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 17d7fece35d2..280fc3d53a36 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 744da6d1c405..a1f099628850 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
ar->bmi.done_sent = false;
}
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
if (ar->bmi.done_sent) {
- ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
return 0;
}
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
- __func__, ar, address, length);
-
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, *param);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
- __func__, ar, address, *param);
-
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
+ ath10k_dbg(ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f8b969f518f8..e46951b8fb92 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *indicator_addr;
-
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
- return;
- }
-
- /* workaround for QCA988x_1.0 HW CE */
- indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
- if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
- iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
- } else {
- unsigned long irq_flags;
- local_irq_save(irq_flags);
- iowrite32(1, indicator_addr);
-
- /*
- * PCIE write waits for ACK in IPQ8K, there is no
- * need to read back value.
- */
- (void)ioread32(indicator_addr);
- (void)ioread32(indicator_addr); /* conservative */
-
- ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
- iowrite32(0, indicator_addr);
- local_irq_restore(irq_flags);
- }
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
@@ -306,11 +277,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
- ret = -EIO;
+ ret = -ENOSR;
goto exit;
}
@@ -346,7 +319,7 @@ exit:
return ret;
}
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@@ -365,77 +338,26 @@ int ath10k_ce_send(struct ce_state *ce_state,
return ret;
}
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
- unsigned int nbytes, u32 flags)
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
- unsigned int num_items = sendlist->num_items;
- struct ce_sendlist_item *item;
-
- item = &sendlist->item[num_items];
- item->data = buffer;
- item->u.nbytes = nbytes;
- item->flags = flags;
- sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_context,
- struct ce_sendlist *sendlist,
- unsigned int transfer_id)
-{
- struct ce_ring_state *src_ring = ce_state->src_ring;
- struct ce_sendlist_item *item;
- struct ath10k *ar = ce_state->ar;
+ struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int num_items = sendlist->num_items;
- unsigned int sw_index;
- unsigned int write_index;
- int i, delta, ret = -ENOMEM;
+ int delta;
spin_lock_bh(&ar_pci->ce_lock);
-
- sw_index = src_ring->sw_index;
- write_index = src_ring->write_index;
-
- delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
-
- if (delta >= num_items) {
- /*
- * Handle all but the last item uniformly.
- */
- for (i = 0; i < num_items - 1; i++) {
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state,
- CE_SENDLIST_ITEM_CTXT,
- (u32) item->data,
- item->u.nbytes, transfer_id,
- item->flags |
- CE_SEND_FLAG_GATHER);
- if (ret)
- ath10k_warn("CE send failed for item: %d\n", i);
- }
- /*
- * Provide valid context pointer for final item.
- */
- item = &sendlist->item[i];
- ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
- (u32) item->data, item->u.nbytes,
- transfer_id, item->flags);
- if (ret)
- ath10k_warn("CE send failed for last item: %d\n", i);
- }
-
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
spin_unlock_bh(&ar_pci->ce_lock);
- return ret;
+ return delta;
}
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +370,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +394,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
ret = -EIO;
}
ath10k_pci_sleep(ar);
+
+out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -479,14 +405,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
- struct ce_ring_state *dest_ring = ce_state->dest_ring;
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
@@ -535,7 +461,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
return 0;
}
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -556,11 +482,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -612,19 +538,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring = ce_state->src_ring;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret = -EIO;
+ int ret;
if (src_ring->hw_index == sw_index) {
/*
@@ -634,48 +561,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
- ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return ret;
+
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
+
ath10k_pci_sleep(ar);
}
+
read_index = src_ring->hw_index;
- if ((read_index != sw_index) && (read_index != 0xffffffff)) {
- struct ce_desc *sbase = src_ring->shadow_base;
- struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+ if ((read_index == sw_index) || (read_index == 0xffffffff))
+ return -EIO;
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(sdesc->addr);
- *nbytesp = __le16_to_cpu(sdesc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
- CE_DESC_FLAGS_META_DATA);
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
- if (per_transfer_contextp)
- *per_transfer_contextp =
- src_ring->per_transfer_context[sw_index];
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
- /* sanity */
- src_ring->per_transfer_context[sw_index] = NULL;
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
- /* Update sw_index */
- sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
- src_ring->sw_index = sw_index;
- ret = 0;
- }
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
- return ret;
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@@ -727,7 +660,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
return ret;
}
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -756,53 +689,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
- void *transfer_context;
- u32 buf;
- unsigned int nbytes;
- unsigned int id;
- unsigned int flags;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
- if (ce_state->recv_cb) {
- /*
- * Pop completed recv buffers and call the registered
- * recv callback for each
- */
- while (ath10k_ce_completed_recv_next_nolock(ce_state,
- &transfer_context,
- &buf, &nbytes,
- &id, &flags) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->recv_cb(ce_state, transfer_context, buf,
- nbytes, id, flags);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ spin_unlock_bh(&ar_pci->ce_lock);
- if (ce_state->send_cb) {
- /*
- * Pop completed send buffers and call the registered
- * send callback for each
- */
- while (ath10k_ce_completed_send_next_nolock(ce_state,
- &transfer_context,
- &buf,
- &nbytes,
- &id) == 0) {
- spin_unlock_bh(&ar_pci->ce_lock);
- ce_state->send_cb(ce_state, transfer_context,
- buf, nbytes, id);
- spin_lock_bh(&ar_pci->ce_lock);
- }
- }
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@@ -823,10 +732,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
u32 intr_summary;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +761,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
+ int ret;
- ath10k_pci_wake(ar);
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +786,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id;
+ int ce_id, ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
- ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +801,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
@@ -900,13 +814,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
spin_unlock_bh(&ar_pci->ce_lock);
}
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +828,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *src_ring;
+ struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +846,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->src_ring = (struct ce_ring_state *)ptr;
+ ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +865,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -970,6 +877,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
+
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +899,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned) {
+ pci_free_consistent(ar_pci->pdev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ ce_state->src_ring = NULL;
+ return -ENOMEM;
+ }
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +921,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_ring_state *dest_ring;
+ struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +949,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
- ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+ ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
- ce_state->dest_ring = (struct ce_ring_state *)ptr;
+ ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
- ptr += sizeof(struct ce_ring_state);
+ ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
@@ -1055,6 +978,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_state->dest_ring);
+ ce_state->dest_ring = NULL;
+ return -ENOMEM;
+ }
+
dest_ring->base_addr_ce_space_unaligned = base_addr;
/*
@@ -1071,44 +1000,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_pci_wake(ar);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = NULL;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
- if (!ar_pci->ce_id_to_state[ce_id]) {
- ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
- if (ce_state == NULL) {
- spin_unlock_bh(&ar_pci->ce_lock);
- return NULL;
- }
-
- ar_pci->ce_id_to_state[ce_id] = ce_state;
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->state = CE_RUNNING;
- /* Save attribute flags */
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- }
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ctrl_addr;
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1122,12 +1042,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return NULL;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
@@ -1136,40 +1061,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
}
if (attr->src_nentries) {
- if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE src ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
if (attr->dest_nentries) {
- if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
- ce_id);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ if (ret) {
+ ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
/* Enable CE error interrupts */
- ath10k_pci_wake(ar);
ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
ath10k_pci_sleep(ar);
return ce_state;
}
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
- unsigned int ce_id = ce_state->id;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ce_state->state = CE_UNUSED;
- ar_pci->ce_id_to_state[ce_id] = NULL;
-
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1113,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
- kfree(ce_state);
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index c17f07c026f4..15d45b5b7615 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
-#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
@@ -36,16 +35,9 @@
* how to use copy engines.
*/
-struct ce_state;
+struct ath10k_ce_pipe;
-/* Copy Engine operational state */
-enum ce_op_state {
- CE_UNUSED,
- CE_PAUSED,
- CE_RUNNING,
-};
-
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
- enum ce_op_state state;
-
- void (*send_cb) (struct ce_state *ce_state,
- void *per_transfer_send_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id);
- void (*recv_cb) (struct ce_state *ce_state,
- void *per_transfer_recv_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags);
-
- unsigned int src_sz_max;
- struct ce_ring_state *src_ring;
- struct ce_ring_state *dest_ring;
-};
-struct ce_sendlist_item {
- /* e.g. buffer or desc list */
- dma_addr_t data;
- union {
- /* simple buffer */
- unsigned int nbytes;
- /* Rx descriptor list */
- unsigned int ndesc;
- } u;
- /* externally-specified flags; OR-ed with internal flags */
- u32 flags;
-};
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
-struct ce_sendlist {
- unsigned int num_items;
- struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
@@ -190,36 +152,11 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
- u32 buffer,
- unsigned int nbytes,
- /* OR-ed with internal flags */
- u32 flags);
-
-/*
- * Queue a "sendlist" of buffers to be sent using gather to a single
- * anonymous destination buffer
- * ce - which copy engine to use
- * sendlist - list of simple buffers to send using gather
- * transfer_id - arbitrary ID; reflected to destination
- * Returns 0 on success; otherwise an error status.
- *
- * Implemenation note: Pushes multiple buffers with Gather to Source ring.
- */
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
- void *per_transfer_send_context,
- struct ce_sendlist *sendlist,
- /* 14 bits */
- unsigned int transfer_id);
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
@@ -233,17 +170,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
@@ -253,7 +185,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -263,7 +195,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@@ -272,7 +204,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
@@ -282,7 +214,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
@@ -291,13 +223,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +254,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
- /* currently not in use */
- unsigned int priority;
-
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
@@ -336,21 +265,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
-
- /* Future use */
- void *reserved;
};
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
-
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7226c23b9569..1129994fb105 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
- .id = QCA988X_HW_1_0_VERSION,
- .name = "qca988x hw1.0",
- .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
- .fw = {
- .dir = QCA988X_HW_1_0_FW_DIR,
- .fw = QCA988X_HW_1_0_FW_FILE,
- .otp = QCA988X_HW_1_0_OTP_FILE,
- .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
- },
- },
- {
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
@@ -64,33 +53,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
}
-static int ath10k_check_fw_version(struct ath10k *ar)
-{
- char version[32];
-
- if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
- ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
- ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
- ar->fw_version_build >= SUPPORTED_FW_BUILD)
- return 0;
-
- snprintf(version, sizeof(version), "%u.%u.%u.%u",
- SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
- SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
-
- ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
- ar->hw->wiphy->fw_version);
- ath10k_warn("Please upgrade to version %s (or newer)\n", version);
-
- return 0;
-}
-
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
@@ -112,7 +80,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
- ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
@@ -200,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
-static int ath10k_push_board_ext_data(struct ath10k *ar,
- const struct firmware *fw)
+static int ath10k_push_board_ext_data(struct ath10k *ar)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -214,21 +181,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
- ath10k_dbg(ATH10K_DBG_CORE,
- "ath10k: Board extended Data download addr: 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
return 0;
- if (fw->size != (board_data_size + board_ext_data_size)) {
+ if (ar->board_len != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
- fw->size, board_data_size, board_ext_data_size);
+ ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
- fw->data + board_data_size,
+ ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
@@ -247,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
- const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
- ret = ath10k_push_board_ext_data(ar, fw);
+ ret = ath10k_push_board_ext_data(ar);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
goto exit;
@@ -264,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
- ret = ath10k_bmi_write_memory(ar, address, fw->data,
- min_t(u32, board_data_size, fw->size));
+ ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+ min_t(u32, board_data_size,
+ ar->board_len));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
goto exit;
@@ -283,17 +250,16 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- const struct firmware *fw = ar->otp;
u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
- if (!ar->otp)
+ if (!ar->otp_data || !ar->otp_len)
return 0;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
@@ -312,13 +278,13 @@ exit:
static int ath10k_download_fw(struct ath10k *ar)
{
- const struct firmware *fw = ar->firmware;
u32 address;
int ret;
address = ar->hw_params.patch_load_addr;
- ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+ ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
+ ar->firmware_len);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
goto exit;
@@ -330,8 +296,8 @@ exit:
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (ar->board_data && !IS_ERR(ar->board_data))
- release_firmware(ar->board_data);
+ if (ar->board && !IS_ERR(ar->board))
+ release_firmware(ar->board);
if (ar->otp && !IS_ERR(ar->otp))
release_firmware(ar->otp);
@@ -339,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
+ ar->board = NULL;
ar->board_data = NULL;
+ ar->board_len = 0;
+
ar->otp = NULL;
+ ar->otp_data = NULL;
+ ar->otp_len = 0;
+
ar->firmware = NULL;
+ ar->firmware_data = NULL;
+ ar->firmware_len = 0;
}
-static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
@@ -358,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return -EINVAL;
}
- ar->board_data = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board_data)) {
- ret = PTR_ERR(ar->board_data);
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
goto err;
}
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
@@ -376,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->firmware_data = ar->firmware->data;
+ ar->firmware_len = ar->firmware->size;
+
/* OTP may be undefined. If so, don't fetch it at all */
if (ar->hw_params.fw.otp == NULL)
return 0;
@@ -389,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
+ ar->otp_data = ar->otp->data;
+ ar->otp_len = ar->otp->size;
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+ if (IS_ERR(ar->firmware)) {
+ ath10k_err("Could not fetch firmware file '%s': %ld\n",
+ name, PTR_ERR(ar->firmware));
+ return PTR_ERR(ar->firmware);
+ }
+
+ data = ar->firmware->data;
+ len = ar->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err("firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err("Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ break;
+
+ memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+ ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ ar->hw->wiphy->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ar->fw_features);
+ }
+
+ ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+ ar->fw_features,
+ sizeof(ar->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ar->firmware_data = data;
+ ar->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->otp_data = data;
+ ar->otp_len = ie_len;
+
+ break;
+ default:
+ ath10k_warn("Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!ar->firmware_data || !ar->firmware_len) {
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
+ name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ /* now fetch the board file */
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err("board data file not defined");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
+ ath10k_err("could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
return 0;
err:
@@ -396,6 +542,28 @@ err:
return ret;
}
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+ if (ret == 0) {
+ ar->fw_api = 2;
+ goto out;
+ }
+
+ ret = ath10k_core_fetch_firmware_api_1(ar);
+ if (ret)
+ return ret;
+
+ ar->fw_api = 1;
+
+out:
+ ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
static int ath10k_init_download_firmware(struct ath10k *ar)
{
int ret;
@@ -446,6 +614,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn("could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
ath10k_info("UART prints enabled\n");
return 0;
}
@@ -545,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
init_waitqueue_head(&ar->event_queue);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -559,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
+ ath10k_debug_destroy(ar);
+
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
@@ -570,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
{
int status;
+ lockdep_assert_held(&ar->conf_mutex);
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -620,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
- status = ath10k_check_fw_version(ar);
- if (status)
- goto err_disconnect_htc;
-
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
@@ -641,7 +819,12 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_disconnect_htc;
+
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ INIT_LIST_HEAD(&ar->arvifs);
return 0;
@@ -658,6 +841,9 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
@@ -704,23 +890,65 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ mutex_lock(&ar->conf_mutex);
+
ret = ath10k_core_start(ar);
if (ret) {
ath10k_err("could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
+ mutex_unlock(&ar->conf_mutex);
return ret;
}
ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
ath10k_hif_power_down(ar);
return 0;
}
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+ u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+ ar->chip_id, hw_revision);
+
+ /* Check that we are not using hw1.0 (some of them have same pci id
+ * as hw2.0) before doing anything else as ath10k crashes horribly
+ * due to missing hw1.0 workarounds. */
+ switch (hw_revision) {
+ case QCA988X_HW_1_0_CHIP_ID_REV:
+ ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+ return -EOPNOTSUPP;
+
+ case QCA988X_HW_2_0_CHIP_ID_REV:
+ /* known hardware revision, continue normally */
+ return 0;
+
+ default:
+ ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+ ar->chip_id);
+ return 0;
+ }
+
+ return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +983,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
+
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e4bba563ed42..0934f7633de3 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -43,27 +43,23 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
+#define ATH10K_MAX_NUM_MGMT_PENDING 16
+
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
+ u8 vdev_id;
struct {
- u8 vdev_id;
- u16 msdu_id;
u8 tid;
bool is_offchan;
- bool is_conf;
- bool discard;
- bool no_ack;
- u8 refcount;
- struct sk_buff *txfrag;
- struct sk_buff *msdu;
- } __packed htt;
- /* 4 bytes left on 64bit arch */
+ u8 frag_len;
+ u8 pad_len;
+ } __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -108,15 +104,26 @@ struct ath10k_bmi {
bool done_sent;
};
+#define ATH10K_MAX_MEM_REQS 16
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
- atomic_t pending_tx_count;
- wait_queue_head_t wq;
+ wait_queue_head_t tx_credits_wq;
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
- struct sk_buff_head wmi_event_list;
- struct work_struct wmi_event_work;
+ u32 num_mem_chunks;
+ struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
@@ -198,17 +205,22 @@ struct ath10k_peer {
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
+ struct list_head list;
+
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
+ struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
+ struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
- u8 def_wep_key_index;
+ u8 def_wep_key_idx;
+ u8 def_wep_key_newidx;
u16 tx_seq_no;
@@ -246,6 +258,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
@@ -270,12 +285,27 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* firmware from 10X branch */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
+ u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -288,6 +318,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -319,9 +351,19 @@ struct ath10k {
} fw;
} hw_params;
- const struct firmware *board_data;
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
const struct firmware *otp;
+ const void *otp_data;
+ size_t otp_len;
+
const struct firmware *firmware;
+ const void *firmware_data;
+ size_t firmware_len;
+
+ int fw_api;
struct {
struct completion started;
@@ -364,6 +406,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
@@ -372,6 +415,9 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
enum ath10k_state state;
struct work_struct restart_work;
@@ -393,7 +439,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3d65594fa098..760ff2289e3c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -499,6 +501,144 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn("failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+ return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer(). */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+}
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +647,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +661,20 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_htt_stats_mask);
+
return 0;
}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 168140c54028..3cfe3ee90dbe 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -27,22 +27,26 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
- ATH10K_DBG_CORE = 0x00000020,
+ ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
@@ -50,11 +54,24 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
@@ -68,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index ef3329ef52f3..3118d7506734 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
- memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- struct sk_buff *skb,
- u8 credits)
-{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- int ret;
-
- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
- ep->eid, skb);
-
- ath10k_htc_prepare_tx_skb(ep, skb);
-
- ret = ath10k_skb_map(htc->ar->dev, skb);
- if (ret)
- goto err;
-
- ret = ath10k_hif_send_head(htc->ar,
- ep->ul_pipe_id,
- ep->eid,
- skb->len,
- skb);
- if (unlikely(ret))
- goto err;
-
- return 0;
-err:
- ath10k_warn("HTC issue failed: %d\n", ret);
-
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- spin_unlock_bh(&htc->tx_lock);
-
- /* this is the simplest way to handle out-of-resources for non-credit
- * based endpoints. credit based endpoints can still get -ENOSR, but
- * this is highly unlikely as credit reservation should prevent that */
- if (ret == -ENOSR) {
- spin_lock_bh(&htc->tx_lock);
- __skb_queue_head(&ep->tx_queue, skb);
- spin_unlock_bh(&htc->tx_lock);
-
- return ret;
- }
-
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
-
- return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep,
- u8 *credits)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
- int credits_required;
- int remainder;
- unsigned int transfer_len;
-
- lockdep_assert_held(&htc->tx_lock);
-
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- return NULL;
-
- skb_cb = ATH10K_SKB_CB(skb);
- transfer_len = skb->len;
-
- if (likely(transfer_len <= htc->target_credit_size)) {
- credits_required = 1;
- } else {
- /* figure out how many credits this message requires */
- credits_required = transfer_len / htc->target_credit_size;
- remainder = transfer_len % htc->target_credit_size;
-
- if (remainder)
- credits_required++;
- }
-
- ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
- credits_required, ep->tx_credits);
-
- if (ep->tx_credits < credits_required) {
- __skb_queue_head(&ep->tx_queue, skb);
- return NULL;
- }
-
- ep->tx_credits -= credits_required;
- *credits = credits_required;
- return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
- struct ath10k_htc_ep *ep = container_of(work,
- struct ath10k_htc_ep, send_work);
- struct ath10k_htc *htc = ep->htc;
- struct sk_buff *skb;
- u8 credits = 0;
- int ret;
-
- while (true) {
- if (ep->ul_is_polled)
- ath10k_htc_send_complete_check(ep, 0);
-
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credit_flow_enabled)
- skb = ath10k_htc_get_skb_credit_based(htc, ep,
- &credits);
- else
- skb = __skb_dequeue(&ep->tx_queue);
- spin_unlock_bh(&htc->tx_lock);
-
- if (!skb)
- break;
-
- ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
- if (ret == -ENOSR)
- break;
- }
-}
-
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ int credits = 0;
+ int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
+ /* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
+ spin_unlock_bh(&htc->tx_lock);
- __skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- spin_unlock_bh(&htc->tx_lock);
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ ret = ath10k_skb_map(htc->ar->dev, skb);
+ if (ret)
+ goto err_credits;
+
+ ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+ skb->len, skb);
+ if (ret)
+ goto err_unmap;
+
return 0;
+
+err_unmap:
+ ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
- /* note: when using TX credit flow, the re-checking of queues happens
- * when credits flow back from the target. in the non-TX credit case,
- * we recheck after the packet completes */
- spin_lock_bh(&htc->tx_lock);
- if (!ep->tx_credit_flow_enabled && !htc->stopped)
- queue_work(ar->workqueue, &ep->send_work);
- spin_unlock_bh(&htc->tx_lock);
-
return 0;
}
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
- struct ath10k_htc_ep *ep)
-{
- struct sk_buff *skb;
- struct ath10k_skb_cb *skb_cb;
-
- spin_lock_bh(&htc->tx_lock);
- for (;;) {
- skb = __skb_dequeue(&ep->tx_queue);
- if (!skb)
- break;
-
- skb_cb = ATH10K_SKB_CB(skb);
- skb_cb->is_aborted = true;
- ath10k_htc_notify_tx_completion(ep, skb);
- }
- spin_unlock_bh(&htc->tx_lock);
-
- cancel_work_sync(&ep->send_work);
-}
-
/***********/
/* Receive */
/***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
- if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
- queue_work(htc->ar->workqueue, &ep->send_work);
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
}
spin_unlock_bh(&htc->tx_lock);
}
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
- skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
- INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC Service %s does not allocate target credits\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
- req_msg = &msg->connect_service;
- req_msg->flags = __cpu_to_le16(flags);
- req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
INIT_COMPLETION(htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -873,19 +760,19 @@ setup:
if (status)
return status;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ATH10K_DBG_HTC,
- "EP %d UL polled: %d, DL polled: %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
- ath10k_dbg(ATH10K_DBG_HTC,
- "HTC service: %s eid: %d TX flow control disabled\n",
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
- int i;
- struct ath10k_htc_ep *ep;
-
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
- for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
- ep = &htc->endpoint[i];
- ath10k_htc_flush_endpoint_tx(htc, ep);
- }
-
ath10k_hif_stop(htc->ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e1dd8c761853..4716d331e6b6 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
- struct sk_buff_head tx_queue;
-
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
-
- struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 39342c5cfcb2..5f7eeebc5432 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,21 +104,16 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt target version %d.%d; host version %d.%d\n",
- htt->target_version_major,
- htt->target_version_minor,
- HTT_CURRENT_VERSION_MAJOR,
- HTT_CURRENT_VERSION_MINOR);
-
- if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
- ath10k_err("htt major versions are incompatible!\n");
+ ath10k_info("htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
return -ENOTSUPP;
}
- if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
- ath10k_warn("htt minor version differ but still compatible\n");
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 318be4629cde..1a337e93b7e9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
+#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
-#define HTT_CURRENT_VERSION_MAJOR 2
-#define HTT_CURRENT_VERSION_MINOR 1
-
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
+ struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e784c40b904b..90d4f74c28d7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
+#include "trace.h"
#include <linux/log2.h>
@@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
- int ret, num_to_fill;
+ int ret, num_deficit, num_to_fill;
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
- num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+ (unsigned long)htt);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+ struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
- struct sk_buff *amsdu;
struct sk_buff *first;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ struct ieee80211_hdr *hdr;
+ u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
- int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- /* FIXME: No idea what assumptions are safe here. Need logs */
- if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
- (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOTSUPP;
- }
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(hdr_buf, hdr, hdr_len);
+ hdr = (struct ieee80211_hdr *)hdr_buf;
- /* A-MSDU max is a little less than 8K */
- amsdu = dev_alloc_skb(8*1024);
- if (!amsdu) {
- ath10k_warn("A-MSDU allocation failed\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- return -ENOMEM;
- }
-
- if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
- int hdrlen;
-
- hdr = (void *)rxd->rx_hdr_status;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
- }
+ /* FIXME: Hopefully this is a temporary measure.
+ *
+ * Reporting individual A-MSDU subframes means each reported frame
+ * shares the same sequence number.
+ *
+ * mac80211 drops frames it recognizes as duplicates, i.e.
+ * retransmission flag is set and sequence number matches sequence
+ * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+ * "Duplicate detection and recovery")
+ *
+ * To avoid frames being dropped clear retransmission flag for all
+ * received A-MSDUs.
+ *
+ * Worst case: actual duplicate frames will be reported but this should
+ * still be handled gracefully by other OSI/ISO layers. */
+ hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
- int decap_len = 0;
+ int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
- if (skb == first) {
- /* We receive linked A-MSDU subframe skbuffs. The
- * first one contains the original 802.11 header (and
- * possible crypto param) in the RX descriptor. The
- * A-MSDU subframe header follows that. Each part is
- * aligned to 4 byte boundary. */
-
- hdr = (void *)amsdu->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
- decap_hdr += roundup(hdr_len, 4);
- decap_hdr += roundup(crypto_len, 4);
- }
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- /* Ethernet2 decap inserts ethernet header in place of
- * A-MSDU subframe header. */
- skb_pull(skb, 6 + 6 + 2);
-
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
-
- /* Ethernet2 decap also strips the LLC/SNAP so we need
- * to re-insert it. The LLC/SNAP follows A-MSDU
- * subframe header. */
- /* FIXME: Not all LLCs are 8 bytes long */
- decap_len += 8;
-
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+ /* First frame in an A-MSDU chain has more decapped data. */
+ if (skb == first) {
+ len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+ 4);
+ decap_hdr += len;
}
- if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
- /* Native Wifi decap inserts regular 802.11 header
- * in place of A-MSDU subframe header. */
+ switch (fmt) {
+ case RX_MSDU_DECAP_RAW:
+ /* remove trailing FCS */
+ skb_trim(skb, skb->len - FCS_LEN);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ /* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ skb_pull(skb, hdr_len);
- /* A-MSDU subframe header length */
- decap_len += 6 + 6 + 2;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)hdr_buf;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
- }
+ /* original A-MSDU header has the bit set but we're
+ * not including A-MSDU subframe header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (fmt == RX_MSDU_DECAP_RAW)
- skb_trim(skb, skb->len - 4); /* remove FCS */
+ /* original 802.11 header has a different DA */
+ memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ /* strip ethernet header and insert decapped 802.11
+ * header, amsdu subframe header and rfc1042 header */
- memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+ len = 0;
+ len += sizeof(struct rfc1042_hdr);
+ len += sizeof(struct amsdu_subframe_hdr);
- /* A-MSDU subframes are padded to 4bytes
- * but relative to first subframe, not the whole MPDU */
- if (skb->next && ((decap_len + skb->len) & 3)) {
- int padlen = 4 - ((decap_len + skb->len) & 3);
- memset(skb_put(amsdu, padlen), 0, padlen);
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, len), decap_hdr, len);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* insert decapped 802.11 header making a singly
+ * A-MSDU */
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
+ info->skb = skb;
+ info->encrypt_type = enctype;
skb = skb->next;
- }
+ info->skb->next = NULL;
- info->skb = amsdu;
- info->encrypt_type = enctype;
-
- ath10k_htt_rx_free_msdu_chain(first);
+ ath10k_process_rx(htt->ar, info);
+ }
- return 0;
+ /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+ * monitor interface active for sniffing purposes. */
}
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
+ int hdr_len;
+ void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
- skb_trim(skb, skb->len - 4);
+ skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* nothing to do here */
+ /* Pull decapped header */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(skb, hdr_len);
+
+ /* Push original header */
+ hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* macaddr[6] + macaddr[6] + ethertype[2] */
- skb_pull(skb, 6 + 6 + 2);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* macaddr[6] + macaddr[6] + len[2] */
- /* we don't need this for non-A-MSDU */
- skb_pull(skb, 6 + 6 + 2);
- break;
- }
+ /* strip ethernet header and insert decapped 802.11 header and
+ * rfc1042 header */
- if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
- void *llc;
- int llclen;
+ rfc1042 = hdr;
+ rfc1042 += roundup(hdr_len, 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
- llclen = 8;
- llc = hdr;
- llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
- llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
- skb_push(skb, llclen);
- memcpy(skb->data, llc, llclen);
- }
+ skb_pull(skb, sizeof(struct ethhdr));
+ memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+ rfc1042, sizeof(struct rfc1042_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ /* remove A-MSDU subframe header and insert
+ * decapped 802.11 header. rfc1042 header is already there */
- if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
- int len = ieee80211_hdrlen(hdr->frame_control);
- skb_push(skb, len);
- memcpy(skb->data, hdr, len);
+ skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+ memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ break;
}
info->skb = skb;
info->encrypt_type = enctype;
- return 0;
+
+ ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
- int ret;
- int ip_summed;
memset(&info, 0, sizeof(info));
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
- /* The skb is not yet processed and it may be
- * reallocated. Since the offload is in the original
- * skb extract the checksum now and assign it later */
- ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ret = ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, &info);
else
- ret = ath10k_htt_rx_msdu(htt, &info);
-
- if (ret && !info.fcs_err) {
- ath10k_warn("error processing msdus %d\n", ret);
- dev_kfree_skb_any(info.skb);
- continue;
- }
-
- if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
- ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
- info.skb->ip_summed = ip_summed;
-
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ ath10k_htt_rx_msdu(htt, &info);
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
+ tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_completed(htt, &tx_done);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
- case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 656c2546b294..d9335e9d0d04 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+ ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
- struct sk_buff *txdesc;
+ struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
- txdesc = htt->pending_tx[msdu_id];
- if (!txdesc)
- continue;
-
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
- ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
- ATH10K_SKB_CB(txdesc)->htt.discard = true;
- ath10k_txrx_tx_unref(htt, txdesc);
+ ath10k_txrx_tx_unref(htt, &tx_done);
}
}
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = &ar->htt;
-
- if (skb_cb->htt.is_conf) {
- dev_kfree_skb_any(skb);
- return;
- }
-
- if (skb_cb->is_aborted) {
- skb_cb->htt.discard = true;
-
- /* if the skbuff is aborted we need to make sure we'll free up
- * the tx resources, we can't simply run tx_unref() 2 times
- * because if htt tx completion came in earlier we'd access
- * unallocated memory */
- if (skb_cb->htt.refcount > 1)
- skb_cb->htt.refcount = 1;
- }
-
- ath10k_txrx_tx_unref(htt, skb);
+ dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
+ ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
- ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- txdesc = ath10k_htc_alloc_skb(len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err;
- }
-
spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ goto err_tx_dec;
}
- htt->pending_tx[msdu_id] = txdesc;
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
+ txdesc = ath10k_htc_alloc_skb(len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
+ goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.msdu = msdu;
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
-
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- struct ath10k_skb_cb *skb_cb;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
- struct sk_buff *txfrag = NULL;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+ bool use_frags;
+ u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
u8 tid;
- int prefetch_len, desc_len, frag_len;
- dma_addr_t frags_paddr;
+ int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
- return res;
+ goto err;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ htt->pending_tx[msdu_id] = msdu;
+ spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
- frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
- goto err;
+ goto err_free_msdu_id;
}
- txfrag = dev_alloc_skb(frag_len);
- if (!txfrag) {
- res = -ENOMEM;
- goto err;
- }
+ /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+ * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+ * fragment list host driver specifies directly frame pointer. */
+ use_frags = htt->target_version_major < 3 ||
+ !ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
- goto err;
+ goto err_free_txdesc;
}
- spin_lock_bh(&htt->tx_lock);
- msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
- if (msdu_id < 0) {
- spin_unlock_bh(&htt->tx_lock);
- res = msdu_id;
- goto err;
+ if (use_frags) {
+ skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+ skb_cb->htt.pad_len = (unsigned long)msdu->data -
+ round_down((unsigned long)msdu->data, 4);
+
+ skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ } else {
+ skb_cb->htt.frag_len = 0;
+ skb_cb->htt.pad_len = 0;
}
- htt->pending_tx[msdu_id] = txdesc;
- spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
- goto err;
-
- /* tx fragment list must be terminated with zero-entry */
- skb_put(txfrag, frag_len);
- tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
- tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
- tx_frags[0].len = __cpu_to_le32(msdu->len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- res = ath10k_skb_map(dev, txfrag);
- if (res)
- goto err;
+ goto err_pull_txfrag;
+
+ if (use_frags) {
+ dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ /* tx fragment list must be terminated with zero-entry */
+ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+ tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+ skb_cb->htt.frag_len +
+ skb_cb->htt.pad_len);
+ tx_frags[0].len = __cpu_to_le32(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
+ tx_frags[1].paddr = __cpu_to_le32(0);
+ tx_frags[1].len = __cpu_to_le32(0);
+
+ dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+ ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
- txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
- memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ if (use_frags)
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ else
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len);
+ cmd->data_tx.len = __cpu_to_le16(msdu->len -
+ skb_cb->htt.frag_len -
+ skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
- /* refcount is decremented by HTC and HTT completions until it reaches
- * zero and is freed */
- skb_cb = ATH10K_SKB_CB(txdesc);
- skb_cb->htt.msdu_id = msdu_id;
- skb_cb->htt.refcount = 2;
- skb_cb->htt.txfrag = txfrag;
- skb_cb->htt.msdu = msdu;
+ memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
- goto err;
+ goto err_unmap_msdu;
return 0;
-err:
- if (txfrag)
- ath10k_skb_unmap(dev, txfrag);
- if (txdesc)
- dev_kfree_skb_any(txdesc);
- if (txfrag)
- dev_kfree_skb_any(txfrag);
- if (msdu_id >= 0) {
- spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
- }
- ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ htt->pending_tx[msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 44ed5af0a204..8aeb46d9b534 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,28 +20,37 @@
#include "targaddrs.h"
-/* Supported FW version */
-#define SUPPORTED_FW_MAJOR 1
-#define SUPPORTED_FW_MINOR 0
-#define SUPPORTED_FW_RELEASE 0
-#define SUPPORTED_FW_BUILD 629
-
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION 0x4000002c
-#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+#define ATH10K_FW_API2_FILE "firmware-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+};
+
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
@@ -53,6 +62,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
@@ -60,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
+/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
@@ -75,7 +88,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
-#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@@ -90,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS 256
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
@@ -169,6 +216,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_CHIP_ID_ADDRESS 0x000000ec
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index cf2ba4d850c9..0b1cc516e778 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
ATH10K_RTS_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_RTS_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
if (value != 0xFFFFFFFF)
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
ATH10K_FRAGMT_THRESHOLD_MIN,
ATH10K_FRAGMT_THRESHOLD_MAX);
- return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
- value);
+ vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -460,6 +464,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -503,13 +512,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
struct wmi_vdev_start_request_arg arg = {};
- enum nl80211_channel_type type;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -560,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
- /* For some reasons, ath10k_wmi_vdev_down() here couse
- * often ath10k_wmi_vdev_stop() to fail. Next we could
- * not run monitor vdev and driver reload
- * required. Don't see such problems we skip
- * ath10k_wmi_vdev_down() here.
- */
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn("Monitor vdev down failed: %d\n", ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
@@ -607,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
@@ -639,7 +642,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
@@ -668,13 +671,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info,
const u8 self_peer[ETH_ALEN])
{
+ u32 vdev_param;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -708,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
- ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ATIM_WINDOW,
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -719,47 +723,45 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
/*
* Review this when mac80211 gains per-interface powersave support.
*/
-static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
- struct ath10k_generic_iter *ar_iter = data;
- struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
if (conf->flags & IEEE80211_CONF_PS) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
- ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
- arvif->vdev_id,
- param,
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
arvif->vdev_id);
- return;
+ return ret;
}
-
- ar_iter->ret = ret;
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
- ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
- psmode);
- if (ar_iter->ret)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ return ret;
+ }
+
+ return 0;
}
/**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
- ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
- ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1028,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 ampdu_factor;
if (!vht_cap->vht_supported)
return;
arg->peer_flags |= WMI_PEER_VHT;
-
arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller. */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= WMI_PEER_80MHZ;
@@ -1048,7 +1064,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1093,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- /* FIXME: add VHT */
-
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1106,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
- if (sta->ht_cap.ht_supported) {
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AC_VHT80;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
@@ -1105,30 +1130,32 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
-static int ath10k_peer_assoc(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf)
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *bss_conf,
+ struct wmi_peer_assoc_complete_arg *arg)
{
- struct wmi_peer_assoc_complete_arg arg;
-
lockdep_assert_held(&ar->conf_mutex);
- memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+ memset(arg, 0, sizeof(*arg));
- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
- ath10k_peer_assoc_h_rates(ar, sta, &arg);
- ath10k_peer_assoc_h_ht(ar, sta, &arg);
- ath10k_peer_assoc_h_vht(ar, sta, &arg);
- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
- ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+ ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+ ath10k_peer_assoc_h_rates(ar, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
+ ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
- return ath10k_wmi_peer_assoc(ar, &arg);
+ return 0;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1138,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1153,24 +1181,33 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
+ bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+ ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
+ bss_conf->bssid, ret);
rcu_read_unlock();
return;
}
rcu_read_unlock();
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn("Peer assoc failed for %pM\n: %d",
+ bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d associated, BSSID: %pM, AID: %d\n",
- arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
@@ -1191,10 +1228,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+ arvif->vdev_id);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
- arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,26 +1241,33 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
- ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret)
- ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
- arvif->vdev_id, ret);
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
- ath10k_wmi_flush_tx(ar);
+ /* FIXME: why don't we print error if wmi call fails? */
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- arvif->def_wep_key_index = 0;
+ arvif->def_wep_key_idx = 0;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
struct ieee80211_sta *sta)
{
+ struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+ ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+ if (ret) {
+ ath10k_warn("WMI peer assoc prepare failed for %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+ ath10k_warn("Peer assoc failed for STA %pM\n: %d",
+ sta->addr, ret);
return ret;
}
@@ -1333,8 +1378,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
- "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
- __func__, ch - arg.channels, arg.n_channels,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
@@ -1391,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
+ struct ieee80211_tx_info *info)
+{
+ if (info->control.vif)
+ return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
+
+ if (ar->monitor_enabled)
+ return ar->monitor_vdev_id;
+
+ ath10k_warn("could not resolve vdev id\n");
+ return 0;
+}
+
/*
* Frames sent to the FW have to be in "Native Wifi" format.
* Strip the QoS field from the 802.11 header.
@@ -1411,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
+static void ath10k_tx_wep_key_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ wep_key_work);
+ int ret, keyidx = arvif->def_wep_key_newidx;
+
+ if (arvif->def_wep_key_idx == keyidx)
+ return;
+
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+ if (ret) {
+ ath10k_warn("could not update wep keyidx (%d)\n", ret);
+ return;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+}
+
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1419,11 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
- int ret;
-
- /* TODO AP mode should be implemented */
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
if (!ieee80211_has_protected(hdr->frame_control))
return;
@@ -1435,20 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
key->cipher != WLAN_CIPHER_SUITE_WEP104)
return;
- if (key->keyidx == arvif->def_wep_key_index)
+ if (key->keyidx == arvif->def_wep_key_idx)
return;
- ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
-
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DEF_KEYID,
- key->keyidx);
- if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
- return;
- }
-
- arvif->def_wep_key_index = key->keyidx;
+ /* FIXME: Most likely a few frames will be TXed with an old key. Simply
+ * queueing frames until key index is updated is not an option because
+ * sk_buff may need more processing to be done, e.g. offchannel */
+ arvif->def_wep_key_newidx = key->keyidx;
+ ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1478,19 +1563,42 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int ret;
+ int ret = 0;
- if (ieee80211_is_mgmt(hdr->frame_control))
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else if (ieee80211_is_nullfunc(hdr->frame_control))
+ if (ar->htt.target_version_major >= 3) {
+ /* Since HTT 3.0 there is no separate mgmt tx command */
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ goto exit;
+ }
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features)) {
+ if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+ ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ } else {
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+ }
+ } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features) &&
+ ieee80211_is_nullfunc(hdr->frame_control)) {
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends on
- * the tx status to be correct. */
+ * those frames when it detects link/beacon loss and depends
+ * on the tx status to be correct. */
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- else
+ } else {
ret = ath10k_htt_tx(&ar->htt, skb);
+ }
+exit:
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -1534,18 +1642,19 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
- vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+ vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
if (peer)
+ /* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
@@ -1580,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
}
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret)
+ ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ }
+}
+
/************/
/* Scanning */
/************/
@@ -1643,8 +1782,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1815,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- /* make sure we submit the command so the completion
- * timeout makes sense */
- ath10k_wmi_flush_tx(ar);
-
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
@@ -1709,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = NULL;
- u32 vdev_id = 0;
- u8 tid;
-
- if (info->control.vif) {
- arvif = ath10k_vif_to_arvif(info->control.vif);
- vdev_id = arvif->vdev_id;
- } else if (ar->monitor_enabled) {
- vdev_id = ar->monitor_vdev_id;
- }
+ u8 tid, vdev_id;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1726,12 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
- tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- if (ieee80211_is_data_qos(hdr->frame_control) &&
- is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- }
+ tid = ath10k_tx_h_get_tid(hdr);
+ vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
/* it makes no sense to process injected frames like that */
if (info->control.vif &&
@@ -1742,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
- memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
- ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.tid = tid;
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
- ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+ ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1771,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
@@ -1817,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
else if (ar->state == ATH10K_STATE_RESTARTING)
ar->state = ATH10K_STATE_RESTARTED;
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
- ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
@@ -1847,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
-static void ath10k_config_ps(struct ath10k *ar)
+static int ath10k_config_ps(struct ath10k *ar)
{
- struct ath10k_generic_iter ar_iter;
+ struct ath10k_vif *arvif;
+ int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
-
- ieee80211_iterate_active_interfaces_atomic(
- ar->hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_ps_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn("could not setup powersave (%d)\n", ret);
+ break;
+ }
+ }
- if (ar_iter.ret)
- ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+ return ret;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1884,7 +2002,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +2019,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
- ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1922,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
+ u32 vdev_param;
mutex_lock(&ar->conf_mutex);
@@ -1930,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->ar = ar;
arvif->vif = vif;
+ INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
ret = -EBUSY;
- goto exit;
+ goto err;
}
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
- goto exit;
+ goto err;
}
arvif->vdev_id = bit - 1;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
- ar->free_vdev_map &= ~(1 << arvif->vdev_id);
if (ar->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1973,32 +2092,41 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
- goto exit;
+ goto err;
}
- ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
- arvif->def_wep_key_index);
- if (ret)
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_add(&arvif->list, &ar->arvifs);
+
+ vdev_param = ar->wmi.vdev_param->def_keyid;
+ ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
+ arvif->def_wep_key_idx);
+ if (ret) {
ath10k_warn("Failed to set default keyid: %d\n", ret);
+ goto err_vdev_delete;
+ }
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
ATH10K_HW_TXRX_NATIVE_WIFI);
- if (ret)
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
ath10k_warn("Failed to set TX encap: %d\n", ret);
+ goto err_vdev_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
ath10k_warn("Failed to create peer for AP: %d\n", ret);
- goto exit;
+ goto err_vdev_delete;
}
}
@@ -2007,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+ goto err_peer_delete;
+ }
param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
- if (ret)
+ if (ret) {
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ goto err_peer_delete;
+ }
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
- if (ret)
+ if (ret) {
ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
-exit:
mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+ list_del(&arvif->list);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -2052,9 +2203,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+ cancel_work_sync(&arvif->wep_key_work);
+
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+ }
+ spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
+ list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2064,6 +2223,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ arvif->vdev_id);
+
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2267,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+ ar->monitor_vdev_id);
+
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
- else
- ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
@@ -2130,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
+ u32 vdev_param, pdev_param;
mutex_lock(&ar->conf_mutex);
@@ -2138,44 +2303,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BEACON_INTERVAL,
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->beacon_interval);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Beacon interval: %d set for VDEV: %d\n",
- arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
- ret = ath10k_wmi_pdev_set_param(ar,
- WMI_PDEV_PARAM_BEACON_TX_MODE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set staggered beacon mode for VDEV: %d\n",
- arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_DTIM_PERIOD,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set dtim period: %d for VDEV: %d\n",
- arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2353,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d create peer %pM\n",
+ arvif->vdev_id, info->bssid);
+
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- info->bssid, arvif->vdev_id);
-
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@@ -2207,11 +2371,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start %pM\n",
+ arvif->vdev_id, info->bssid);
+
+ /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
- if (!ret)
- ath10k_dbg(ATH10K_DBG_MAC,
- "VDEV: %d started with BSSID: %pM\n",
- arvif->vdev_id, info->bssid);
}
/*
@@ -2235,16 +2400,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ arvif->vdev_id, cts_prot);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set CTS prot: %d for VDEV: %d\n",
- cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2419,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_SLOT_TIME,
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set slottime: %d for VDEV: %d\n",
- slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2437,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_PREAMBLE,
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set preamble: %d for VDEV: %d\n",
- preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2474,27 +2637,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta)\n",
+ arvif->vdev_id, sta->addr);
+
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM (sta gone)\n",
+ arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2667,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2681,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
+ ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -2732,88 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
* Both RTS and Fragmentation threshold are interface-specific
* in ath10k, but device-specific in mac80211.
*/
-static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
-
- ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
- if (ar_iter->ret)
- ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set RTS threshold: %d for VDEV: %d\n",
- rts, arvif->vdev_id);
-}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_rts_iter, &ar_iter);
- mutex_unlock(&ar->conf_mutex);
-
- return ar_iter.ret;
-}
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
-static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath10k_generic_iter *ar_iter = data;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-
- lockdep_assert_held(&arvif->ar->conf_mutex);
-
- /* During HW reconfiguration mac80211 reports all interfaces that were
- * running until reconfiguration was started. Since FW doesn't have any
- * vdevs at this point we must not iterate over this interface list.
- * This setting will be updated upon add_interface(). */
- if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
- return;
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
- ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
- if (ar_iter->ret)
- ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
- arvif->vdev_id);
- else
- ath10k_dbg(ATH10K_DBG_MAC,
- "Set frag threshold: %d for VDEV: %d\n",
- frag, arvif->vdev_id);
+ return ret;
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
-
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
+ struct ath10k_vif *arvif;
+ int ret = 0;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(
- hw, IEEE80211_IFACE_ITER_NORMAL,
- ath10k_set_frag_iter, &ar_iter);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
mutex_unlock(&ar->conf_mutex);
- return ar_iter.ret;
+ return ret;
}
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
@@ -2836,8 +2959,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
- empty = bitmap_empty(ar->htt.used_msdu_ids,
- ar->htt.max_num_pending_tx);
+ empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3448,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
+ /* MSDU can have HTT TX fragment pushed in front. The additional 4
+ * bytes is used for padding/alignment if necessary. */
+ ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6fce9bfb19a5..ba1021997b8f 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef50b1bd..f8d59c7b9082 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
-#define QCA988X_1_0_DEVICE_ID (0xabcd)
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
- { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{0}
};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
- /* could be moved to share CE3 */
- /* target->host HTT + HTC control */
- { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
- /* target->host WMI */
- { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
- /* host->target WMI */
- { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
- /* host->target HTT */
- { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
- CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
- /* unused */
- { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* Target autonomous hif_memcpy */
- { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
- /* ce_diag, the Diagnostic Window */
- { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: unused */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* host->target HTC control and raw streams */
- { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
- /* target->host HTT + HTC control */
- { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
- /* target->host WMI */
- { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target WMI */
- { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* host->target HTT */
- { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = 0,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = 1,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 512,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = 2,
+ .pipedir = PIPEDIR_IN,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = 3,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = 4,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 256,
+ .nbytes_max = 256,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* NB: 50% of src nentries, since tx has 2 frags */
- /* unused */
- { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
- /* Reserved for target autonomous hif_memcpy */
- { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+ /* CE5: unused */
+ {
+ .pipenum = 5,
+ .pipedir = PIPEDIR_OUT,
+ .nentries = 32,
+ .nbytes_max = 2048,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = 6,
+ .pipedir = PIPEDIR_INOUT,
+ .nentries = 32,
+ .nbytes_max = 4096,
+ .flags = CE_ATTR_FLAGS,
+ .reserved = 0,
+ },
+
/* CE7 used only by Host */
};
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
ath10k_warn("Unable to wakeup target\n");
}
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
atomic_inc(&ar_pci->keep_awake_count);
if (ar_pci->verified_awake)
- return;
+ return 0;
for (;;) {
if (ath10k_pci_target_is_awake(ar)) {
ar_pci->verified_awake = true;
- break;
+ return 0;
}
if (tot_delay > PCIE_WAKE_TIMEOUT) {
- ath10k_warn("target takes too long to wake up (awake count %d)\n",
+ ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+ PCIE_WAKE_TIMEOUT,
atomic_read(&ar_pci->keep_awake_count));
- break;
+ return -ETIMEDOUT;
}
udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
* FIXME: Handle OOM properly.
*/
static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
@@ -511,39 +612,28 @@ exit:
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
- bool process = false;
-
- do {
- /*
- * For the send completion of an item in sendlist, just
- * increment num_sends_allowed. The upper layer callback will
- * be triggered when last fragment is done with send.
- */
- if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
- spin_lock_bh(&pipe_info->pipe_lock);
- pipe_info->num_sends_allowed++;
- spin_unlock_bh(&pipe_info->pipe_lock);
- continue;
- }
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+ &ce_data, &nbytes,
+ &transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+ compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- process = true;
- } while (ath10k_ce_completed_send_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0);
-
- /*
- * If only some of the items within a sendlist have completed,
- * don't invoke completion processing until the entire sendlist
- * has been sent.
- */
- if (!process)
- return;
+ }
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
- void *transfer_context, u32 ce_data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
- do {
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &ce_data, &nbytes, &transfer_id,
+ &flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
- compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+ compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
- compl->transfer_context = transfer_context;
+ compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
-
- } while (ath10k_ce_completed_recv_next(ce_state,
- &transfer_context,
- &ce_data, &nbytes,
- &transfer_id,
- &flags) == 0);
+ }
ath10k_pci_process_ce(ar);
}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ce_state *ce_hdl = pipe_info->ce_hdl;
- struct ce_sendlist sendlist;
+ struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+ struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
- memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
len = min(bytes, nbuf->len);
bytes -= len;
@@ -648,19 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
- ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
- /* Make sure we have resources to handle this request */
- spin_lock_bh(&pipe_info->pipe_lock);
- if (!pipe_info->num_sends_allowed) {
- ath10k_warn("Pipe: %d is full\n", pipe_id);
- spin_unlock_bh(&pipe_info->pipe_lock);
- return -ENOSR;
- }
- pipe_info->num_sends_allowed--;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+ ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+ flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
@@ -670,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
- int ret;
-
- spin_lock_bh(&pipe_info->pipe_lock);
- ret = pipe_info->num_sends_allowed;
- spin_unlock_bh(&pipe_info->pipe_lock);
-
- return ret;
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -764,9 +818,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_diag = ar_pci->ce_diag;
+ struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions, disable_interrupts;
@@ -792,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
ath10k_pci_ce_send_done,
disable_interrupts);
completions += attr->src_nentries;
- pipe_info->num_sends_allowed = attr->src_nentries - 1;
}
if (attr->dest_nentries) {
@@ -805,15 +858,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
continue;
for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(struct ath10k_pci_compl),
- GFP_KERNEL);
+ compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_stop_ce(ar);
return -ENOMEM;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
@@ -840,7 +892,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +902,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
@@ -861,7 +913,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
- netbuf = (struct sk_buff *)compl->transfer_context;
+ netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
@@ -912,12 +964,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
- if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+ switch (compl->state) {
+ case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
- compl->transfer_context,
+ compl->skb,
compl->transfer_id);
send_done = 1;
- } else {
+ break;
+ case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +979,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
break;
}
- skb = (struct sk_buff *)compl->transfer_context;
+ skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +998,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
nbytes,
skb->len + skb_tailroom(skb));
}
+ break;
+ case ATH10K_PCI_COMPL_FREE:
+ ath10k_warn("free completion cannot be processed\n");
+ break;
+ default:
+ ath10k_warn("invalid completion state (%d)\n",
+ compl->state);
+ break;
}
- compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+ compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
*/
spin_lock_bh(&compl->pipe_info->pipe_lock);
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
- compl->pipe_info->num_sends_allowed += send_done;
spin_unlock_bh(&compl->pipe_info->pipe_lock);
}
@@ -1037,12 +1098,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num)
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
@@ -1097,7 +1158,7 @@ err:
static int ath10k_pci_post_rx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret = 0;
@@ -1147,11 +1208,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
@@ -1179,11 +1240,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
@@ -1206,15 +1267,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT)
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ /*
+ * Indicate the completion to higer layer to free
+ * the buffer
+ */
+ ATH10K_SKB_CB(netbuf)->is_aborted = true;
+ ar_pci->msg_callbacks_current.tx_completion(ar,
+ netbuf,
+ id);
}
}
@@ -1232,7 +1292,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1303,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1353,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
- struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
@@ -1378,13 +1440,16 @@ err_dma:
return ret;
}
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id))
+ return;
if (xfer->wait_for_resp)
return;
@@ -1392,14 +1457,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
complete(&xfer->done);
}
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
- void *transfer_context,
- u32 data,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
- struct bmi_xfer *xfer = transfer_context;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id, &flags))
+ return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1747,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct hif_ce_pipe_info *pipe_info;
+ struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
@@ -1895,7 +1963,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
static void ath10k_pci_ce_tasklet(unsigned long ptr)
{
- struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+ struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
struct ath10k_pci *ar_pci = pipe->ar_pci;
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2280,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *mem = ar_pci->mem;
int i;
u32 val;
if (!SOC_GLOBAL_RESET_ADDRESS)
return;
- if (!mem)
- return;
-
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2295,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
}
/* Put Target, including PCIe, into RESET. */
- val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
@@ -2245,16 +2308,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
- ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
- if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+ if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
- ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2330,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
- ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
- break;
- case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
- ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
@@ -2286,7 +2346,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- u32 lcr_val;
+ u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
@@ -2298,9 +2358,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
switch (pci_dev->device) {
- case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
- break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
break;
@@ -2322,10 +2379,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- /* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
- spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2448,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&ar_pci->ce_lock);
- ar_pci->cacheline_sz = dma_get_cache_alignment();
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("Failed to get chip id: %d\n", ret);
+ return ret;
+ }
+
+ chip_id = ath10k_pci_read32(ar,
+ RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+ ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
- ret = ath10k_core_register(ar);
+ ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_iomap;
@@ -2414,7 +2478,6 @@ err_region:
err_device:
pci_disable_device(pdev);
err_ar:
- pci_set_drvdata(pdev, NULL);
ath10k_core_destroy(ar);
err_ar_pci:
/* call HIF PCI free here */
@@ -2442,7 +2505,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
pci_clear_master(pdev);
@@ -2483,9 +2545,6 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 871bb339d56d..52fb7b973571 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
+enum ath10k_pci_compl_state {
+ ATH10K_PCI_COMPL_FREE = 0,
+ ATH10K_PCI_COMPL_SEND,
+ ATH10K_PCI_COMPL_RECV,
+};
+
struct ath10k_pci_compl {
struct list_head list;
- int send_or_recv;
- struct ce_state *ce_state;
- struct hif_ce_pipe_info *pipe_info;
- void *transfer_context;
+ enum ath10k_pci_compl_state state;
+ struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci_pipe *pipe_info;
+ struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
/*
* PCI-specific Target state
*
@@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
@@ -178,9 +178,6 @@ struct hif_ce_pipe_info {
/* List of free CE completion slots */
struct list_head compl_free;
- /* Limit the number of outstanding send requests. */
- int num_sends_allowed;
-
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@@ -190,7 +187,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
- int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
@@ -219,7 +215,7 @@ struct ath10k_pci {
bool compl_processing;
- struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
@@ -227,16 +223,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
- struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
- /* makes sure that dummy reads are atomic */
- spinlock_t hw_v1_workaround_lock;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +237,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
- return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
- iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +307,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *addr = ar_pci->mem;
-
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
- unsigned long irq_flags;
- spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
- ioread32(addr+offset+4); /* 3rd read prior to write */
- ioread32(addr+offset+4); /* 2nd read prior to write */
- ioread32(addr+offset+4); /* 1st read prior to write */
- iowrite32(value, addr+offset);
-
- spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
- irq_flags);
- } else {
- iowrite32(value, addr+offset);
- }
+ iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +318,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_wake(ar);
+ return ath10k_do_pci_wake(ar);
+
+ return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bfec6c8f2ecb..1c584c4b019c 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
- RX_MSDU_DECAP_RAW = 0,
- RX_MSDU_DECAP_NATIVE_WIFI = 1,
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header. */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 85e806bf7257..90817ddc92ba 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(int id, void *buf, size_t buf_len),
+ TP_PROTO(int id, void *buf, size_t buf_len, int ret),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
+ __field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
+ __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id %d len %zu",
+ "id %d len %zu ret %d",
__entry->id,
- __entry->buf_len
+ __entry->buf_len,
+ __entry->ret
)
);
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "len %zu",
+ __entry->buf_len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 68b6faefd1d8..5ae373a1e294 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,40 +44,39 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
- struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
- struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *msdu;
int ret;
- if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
- return;
-
- ATH10K_SKB_CB(txdesc)->htt.refcount--;
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
- if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
return;
-
- if (txfrag) {
- ret = ath10k_skb_unmap(dev, txfrag);
- if (ret)
- ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
- dev_kfree_skb_any(txfrag);
}
+ msdu = htt->pending_tx[tx_done->msdu_id];
+ skb_cb = ATH10K_SKB_CB(msdu);
+
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
+ if (skb_cb->htt.frag_len)
+ skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
- memset(&info->status, 0, sizeof(info->status));
- if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+ if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+ if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
- htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
- ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+ htt->pending_tx[tx_done->msdu_id] = NULL;
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
- if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+ if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
-
- dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
-{
- struct sk_buff *txdesc;
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
-
- if (tx_done->msdu_id >= htt->max_num_pending_tx) {
- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
- tx_done->msdu_id);
- return;
- }
-
- txdesc = htt->pending_tx[tx_done->msdu_id];
-
- ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
- ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
-
- ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e78632a76df7..356dc9c04c9e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,9 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 55f90c761868..ccf3597fd9e2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -23,29 +23,470 @@
#include "wmi.h"
#include "mac.h"
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- if (ar->state == ATH10K_STATE_WEDGED) {
- ath10k_warn("wmi flush skipped - device is wedged anyway\n");
- return;
- }
-
- ret = wait_event_timeout(ar->wmi.wq,
- atomic_read(&ar->wmi.pending_tx_count) == 0,
- 5*HZ);
- if (atomic_read(&ar->wmi.pending_tx_count) == 0)
- return;
-
- if (ret == 0)
- ret = -ETIMEDOUT;
-
- if (ret < 0)
- ath10k_warn("wmi flush failed (%d)\n", ret);
-}
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
@@ -85,18 +526,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
-
- if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
- wake_up(&ar->wmi.wq);
}
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
- enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
- int status;
+ int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +544,146 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
- if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
- WMI_MAX_PENDING_TX_COUNT) {
- /* avoid using up memory when FW hangs */
- atomic_dec(&ar->wmi.pending_tx_count);
- return -EBUSY;
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+ trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct wmi_bcn_tx_arg arg = {0};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->data_lock);
+
+ if (arvif->beacon == NULL)
+ return;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.tx_rate = 0;
+ arg.tx_power = 0;
+ arg.bcn = arvif->beacon->data;
+ arg.bcn_len = arvif->beacon->len;
+
+ ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ if (ret)
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn("wmi command %d is not supported by firmware\n",
+ cmd_id);
+ return ret;
}
- memset(skb_cb, 0, sizeof(*skb_cb));
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+ (ret != -EAGAIN);
+ }), 3*HZ);
- status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- if (status) {
+ if (ret)
dev_kfree_skb_any(skb);
- atomic_dec(&ar->wmi.pending_tx_count);
- return status;
+
+ return ret;
+}
+
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *wmi_skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int len;
+ u16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr) + skb->len;
+ len = round_up(len, 4);
+
+ wmi_skb = ath10k_wmi_alloc_skb(len);
+ if (!wmi_skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+
+ memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(cmd->buf, skb->data, skb->len);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+
+ /* Send the management frame buffer to the target */
+ ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
}
- return 0;
+ /* TODO: report tx status to mac80211 - temporary just ACK */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+
+ return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +873,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@@ -325,13 +885,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
+ int pull_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
- channel = __le32_to_cpu(event->hdr.channel);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- rx_status = __le32_to_cpu(event->hdr.status);
- snr = __le32_to_cpu(event->hdr.snr);
- phy_mode = __le32_to_cpu(event->hdr.phy_mode);
- rate = __le32_to_cpu(event->hdr.rate);
+ channel = __le32_to_cpu(ev_hdr->channel);
+ buf_len = __le32_to_cpu(ev_hdr->buf_len);
+ rx_status = __le32_to_cpu(ev_hdr->status);
+ snr = __le32_to_cpu(ev_hdr->snr);
+ phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
+ rate = __le32_to_cpu(ev_hdr->rate);
memset(status, 0, sizeof(*status));
@@ -358,7 +929,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
- skb_pull(skb, sizeof(event->hdr));
+ skb_pull(skb, pull_len);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +1305,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
- struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
- int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -794,17 +1363,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = bcn->data;
- arg.bcn_len = bcn->len;
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->beacon) {
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+ dev_kfree_skb_any(arvif->beacon);
+ }
- ret = ath10k_wmi_beacon_send(ar, &arg);
- if (ret)
- ath10k_warn("could not send beacon (%d)\n", ret);
+ arvif->beacon = bcn;
- dev_kfree_skb_any(bcn);
+ ath10k_wmi_tx_beacon_nowait(arvif);
+ spin_unlock_bh(&ar->data_lock);
}
}
@@ -919,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
+static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+
+ pool_size = num_units * round_up(unit_len, 4);
+
+ if (!pool_size)
+ return -EINVAL;
+
+ ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+ pool_size,
+ &paddr,
+ GFP_ATOMIC);
+ if (!ar->wmi.mem_chunks[idx].vaddr) {
+ ath10k_warn("failed to allocate memory chunk\n");
+ return -ENOMEM;
+ }
+
+ memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return 0;
+}
+
static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -943,6 +1561,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ /* only manually set fw features when not using FW IE format */
+ if (ar->fw_api == 1 && ar->fw_version_build > 636)
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -987,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
complete(&ar->wmi.service_ready);
}
+static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+ struct wmi_service_ready_event_10x *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ skb->len, sizeof(*ev));
+ return;
+ }
+
+ ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+ ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->fw_version_major =
+ (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+ ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
+
+ ar->ath_common.regulatory.current_rd =
+ __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+ ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+ sizeof(ev->wmi_service_bitmap));
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor);
+ }
+
+ num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
+
+ if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+ ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ if (!num_mem_reqs)
+ goto exit;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+ num_mem_reqs);
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
+ num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
+ unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
+ num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target */
+ /* this needs to be tied, host and target
+ * can get out of sync */
+ num_units = TARGET_10X_NUM_PEERS + 1;
+ else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+ num_units = TARGET_10X_NUM_VDEVS + 1;
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(ev->mem_reqs[i].num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+exit:
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
+ __le32_to_cpu(ev->sw_version),
+ __le32_to_cpu(ev->abi_version),
+ __le32_to_cpu(ev->phy_capability),
+ __le32_to_cpu(ev->ht_cap_info),
+ __le32_to_cpu(ev->vht_cap_info),
+ __le32_to_cpu(ev->vht_supp_mcs),
+ __le32_to_cpu(ev->sys_cap_info),
+ __le32_to_cpu(ev->num_mem_reqs),
+ __le32_to_cpu(ev->num_rf_chains));
+
+ complete(&ar->wmi.service_ready);
+}
+
static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1007,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@@ -1126,64 +1850,158 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
-static void ath10k_wmi_event_work(struct work_struct *work)
+static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k *ar = container_of(work, struct ath10k,
- wmi.wmi_event_work);
- struct sk_buff *skb;
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ u16 len;
- for (;;) {
- skb = skb_dequeue(&ar->wmi.wmi_event_list);
- if (!skb)
- break;
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
- ath10k_wmi_event_process(ar, skb);
- }
-}
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
- struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
- enum wmi_event_id event_id;
+ len = skb->len;
- event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+ trace_ath10k_wmi_event(id, skb->data, skb->len);
- /* some events require to be handled ASAP
- * thus can't be defered to a worker thread */
- switch (event_id) {
- case WMI_HOST_SWBA_EVENTID:
- case WMI_MGMT_RX_EVENTID:
- ath10k_wmi_event_process(ar, skb);
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ break;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_ready_event_rx(ar, skb);
+ break;
default:
+ ath10k_warn("Unknown eventid: %d\n", id);
break;
}
- skb_queue_tail(&ar->wmi.wmi_event_list, skb);
- queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+ dev_kfree_skb(skb);
+}
+
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ath10k_wmi_10x_process_rx(ar, skb);
+ else
+ ath10k_wmi_main_process_rx(ar, skb);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ } else {
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ }
+
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.wq);
-
- skb_queue_head_init(&ar->wmi.wmi_event_list);
- INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
- /* HTC should've drained the packets already */
- if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
- ath10k_warn("there are still pending packets\n");
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
- cancel_work_sync(&ar->wmi.wmi_event_work);
- skb_queue_purge(&ar->wmi.wmi_event_list);
+ ar->wmi.num_mem_chunks = 0;
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +2016,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1234,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1264,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_channel_cmdid);
}
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1279,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->suspend_opt = WMI_PDEV_SUSPEND;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1290,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
if (skb == NULL)
return -ENOMEM;
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
}
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value)
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn("pdev param %d not supported by firmware\n", id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1309,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
-int ath10k_wmi_cmd_init(struct ath10k *ar)
+static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
- u32 val;
+ u32 len, val;
+ int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1370,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
- buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
if (!buf)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)buf->data;
- cmd->num_host_mem_chunks = 0;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
- return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
-static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val;
+ int i;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(len);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ if (ar->wmi.num_mem_chunks == 0) {
+ cmd->num_host_mem_chunks = 0;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ cmd->host_mem_chunks[i].ptr =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ cmd->host_mem_chunks[i].size =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ cmd->host_mem_chunks[i].req_id =
+ __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%x\n",
+ i,
+ cmd->host_mem_chunks[i].size,
+ cmd->host_mem_chunks[i].ptr);
+ }
+out:
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+ return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ int ret;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ ret = ath10k_wmi_10x_cmd_init(ar);
+ else
+ ret = ath10k_wmi_main_cmd_init(ar);
+
+ return ret;
+}
+
+static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
{
int len;
- len = sizeof(struct wmi_start_scan_cmd);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ len = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ len = sizeof(struct wmi_start_scan_cmd);
if (arg->ie_len) {
if (!arg->ie)
@@ -1446,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
int len = 0;
int i;
- len = ath10k_wmi_start_scan_calc_len(arg);
+ len = ath10k_wmi_start_scan_calc_len(ar, arg);
if (len < 0)
return len; /* len contains error code here */
@@ -1478,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
/* TLV list starts after fields included in the struct */
- off = sizeof(*cmd);
+ /* There's just one filed that differes the two start_scan
+ * structures - burst_duration, which we are not using btw,
+ no point to make the split here, just shift the buffer to fit with
+ given FW */
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ off = sizeof(struct wmi_start_scan_cmd_10x);
+ else
+ off = sizeof(struct wmi_start_scan_cmd);
if (arg->n_channels) {
channels = (void *)skb->data + off;
@@ -1540,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1556,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
- arg->max_scan_time = 5000;
+ arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
| WMI_SCAN_EVENT_COMPLETED
@@ -1600,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
}
int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1625,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
}
int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1643,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
}
static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg,
- enum wmi_cmd_id cmd_id)
+ u32 cmd_id)
{
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
- cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+ if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
+ cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
return -EINVAL;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return -EINVAL;
@@ -1665,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
- if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+ if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
cmdname = "start";
- else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+ else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
cmdname = "restart";
else
return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1718,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
int ath10k_wmi_vdev_start(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_START_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
- return ath10k_wmi_vdev_start_restart(ar, arg,
- WMI_VDEV_RESTART_REQUEST_CMDID);
+ u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
+
+ return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1743,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1758,13 +2728,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->vdev_assoc_id = __cpu_to_le32(aid);
- memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+ memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
}
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1782,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
}
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value)
+ u32 param_id, u32 param_value)
{
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return -EOPNOTSUPP;
+ }
+
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1804,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
}
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1839,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
}
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1859,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
}
int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1879,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
}
int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1900,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
}
int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1918,13 +2896,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+ memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
}
int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1945,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
}
int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1967,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
- return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
}
int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1993,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
- return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
}
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2046,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->flags |= __cpu_to_le32(flags);
}
- return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
}
int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2105,10 +3086,11 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n",
arg->vdev_id, arg->addr);
- return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
@@ -2124,7 +3106,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
- return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+ return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2155,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
- return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2171,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
cmd->stats_id = __cpu_to_le32(stats_id);
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
- return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2190,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
- return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2c5a4f8daf2e..78c991aec7f9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -208,6 +208,118 @@ struct wmi_mac_addr {
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+};
+
/*
* wmi command groups.
*/
@@ -247,7 +359,9 @@ enum wmi_cmd_group {
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
-/* Command IDs and commande events. */
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
enum wmi_cmd_id {
WMI_INIT_CMDID = 0x1,
@@ -488,6 +602,217 @@ enum wmi_event_id {
WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
};
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
@@ -508,6 +833,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled */
+ };
+
+ return "<unknown>";
+}
+
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
@@ -763,13 +1130,45 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
-/*
- * status consists of upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS 0x0
-#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+/* This is the definition from 10.X firmware branch */
+struct wmi_service_ready_event_10x {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -978,6 +1377,192 @@ struct wmi_resource_config {
__le32 max_frag_entries;
} __packed;
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum scan requests than can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+
+#define NUM_UNITS_IS_NUM_VDEVS 0x1
+#define NUM_UNITS_IS_NUM_PEERS 0x2
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -999,6 +1584,18 @@ struct wmi_init_cmd {
struct host_memory_chunk host_mem_chunks[1];
} __packed;
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ __le32 num_host_mem_chunks;
+
+ /*
+ * variable number of host memory chunks.
+ * This should be the last element in the structure
+ */
+ struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
/* TLV for channel list */
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1118,6 +1715,88 @@ struct wmi_start_scan_cmd {
*/
} __packed;
+/* This is the definition from 10.X firmware branch */
+struct wmi_start_scan_cmd_10x {
+ /* Scan ID */
+ __le32 scan_id;
+
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+
+ /*
+ * TLV (tag length value ) paramerters follow the scan_cmd structure.
+ * TLV can contain channel list, bssid list, ssid list and
+ * ie. the TLV tags are defined above;
+ */
+} __packed;
+
+
struct wmi_ssid_arg {
int len;
const u8 *ssid;
@@ -1268,7 +1947,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
@@ -1277,8 +1956,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
-struct wmi_mgmt_rx_event {
- struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
@@ -1465,6 +2154,60 @@ struct wmi_csa_event {
#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 arpdhcp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
enum wmi_pdev_param {
/* TX chian mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1564,6 +2307,97 @@ enum wmi_pdev_param {
WMI_PDEV_PARAM_POWER_GATING_SLEEP,
};
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
+
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
@@ -2088,6 +2922,61 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
/* the definition of different VDEV parameters */
enum wmi_vdev_param {
/* RTS Threshold */
@@ -2219,6 +3108,121 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_TX_ENCAP_TYPE,
};
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@@ -3000,7 +4004,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
@@ -3013,7 +4016,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3022,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
- u32 value);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3043,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
const u8 *bssid);
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
- enum wmi_vdev_param param_id, u32 param_value);
+ u32 param_id, u32 param_value);
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3066,11 +4067,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+ const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e9bc9e616b69..79bffe165cab 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
-
-
- bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
- struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 48161edec8de..69f58b073e85 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
ah->stats.tx_bytes_count += skb->len;
info = IEEE80211_SKB_CB(skb);
+ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+ memcpy(info->status.rates, bf->rates, size);
+
tries[0] = info->status.rates[0].count;
tries[1] = info->status.rates[1].count;
tries[2] = info->status.rates[2].count;
ieee80211_tx_info_clear_status(info);
- size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
- memcpy(info->status.rates, bf->rates, size);
-
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 98a886154d9c..05debf700a84 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,8 +22,7 @@
#define ATH6KL_MAX_IE 256
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 74369de00fb5..ca9ba005f287 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index a2c8ff809793..14cab1403dd6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -60,7 +60,7 @@
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
-#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7944c25c9a43..32f139e2e897 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,6 +84,26 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 75ee9e7704ce..6205ef5a9321 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -14,9 +14,7 @@ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
- dfs.o \
- dfs_pattern_detector.o \
- dfs_pri_detector.o
+ dfs.o
ath9k-$(CONFIG_PM_SLEEP) += wow.o
obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 072e4b531067..2dff2765769b 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
- pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index be466b0ef7a7..d28923b7435b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
ath_dbg(common, ANI,
- "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
* restore historical levels for this channel
*/
ath_dbg(common, ANI,
- "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
- chan->channelFlags,
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index dd1cc73d7946..bd048cc69a33 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x39;
+ ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x1;
- }
+ ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x4;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x6;
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
- if ((antcomb->scan == 0) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
- ant_conf->fast_div_bias = 0x3f;
- } else {
- ant_conf->fast_div_bias = 0x6;
- }
+ ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 08656473c63e..ff415e863ee9 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+ val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_FC_DYN2040_EN;
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_FC_DYN2040_PRI_CH;
}
REG_WRITE(ah, AR_PHY_TURBO, phymode);
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
ENABLE_REGWRITE_BUFFER(ah);
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
int i, regWrites = 0;
u32 modesIndex, freqIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
+ if (IS_CHAN_5GHZ(chan)) {
freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ } else {
freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
}
/*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (!AR_SREV_9280_20_OR_LATER(ah))
rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 9f589744a9f9..cdc74005650c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
bool supported = false;
switch (ah->supp_cals & cal_type) {
case IQ_MISMATCH_CAL:
- /* Run IQ Mismatch for non-CCK only */
- if (!IS_CHAN_B(chan))
- supported = true;
+ supported = true;
break;
case ADC_GAIN_CAL:
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
- if (!IS_CHAN_B(chan) &&
- !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
supported = true;
break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
- nfcal_pending = ah->caldata->nfcal_pending;
+ nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index fb61b081d172..5c95fd9e9c9e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
u32 modesIndex;
int i;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
ENABLE_REGWRITE_BUFFER(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 1fc1fa955d44..f087117b2e6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -679,6 +680,26 @@ static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -700,6 +721,8 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9002_hw_tx99_start;
+ ops->tx99_stop = ar9002_hw_tx99_stop;
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6988e1d081f2..22934d3ca544 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
- if (caldata)
- caldata->done_txiqcal_once = is_reusable;
+ if (caldata) {
+ if (is_reusable)
+ set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ else
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ }
return;
}
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
}
static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+ return;
+
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
}
+
+ if (caldata)
+ set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+ if (IS_CHAN_2GHZ(chan)){
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ } else {
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ }
+ }
}
static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
+ if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
caldata->tx_clcal[i][j] =
REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
}
- caldata->done_txclcal_once = true;
+ set_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
}
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
ar9003_hw_rtt_clear_hist(ah);
}
- if (rtt && !run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
- agc_supp_cals &= agc_ctrl;
- agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
- AR_PHY_AGC_CONTROL_FLTR_CAL |
- AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ if (rtt) {
+ if (!run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ } else {
+ if (ah->ah_flags & AH_FASTCC)
+ run_agc_cal = true;
+ }
}
if (ah->enabled_cals & TX_CL_CAL) {
- if (caldata && caldata->done_txclcal_once)
+ if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* AGC calibration
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (caldata && !caldata->done_txiqcal_once)
+ if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- } else if (caldata && !caldata->done_txiqcal_once) {
+ } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
run_agc_cal = true;
sep_iq_cal = true;
}
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+ /* Disable BB_active */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- ar9003_hw_do_manual_peak_cal(ah, chan);
+ ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+ udelay(5);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
- else if (caldata && caldata->done_txiqcal_once)
+ else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
- if (!ath9k_hw_rfbus_req(ah))
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah),
"Could not stop baseband\n");
- else
+ } else {
ar9003_hw_rtt_fill_hist(ah);
+ if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+ ar9003_hw_rtt_load_hist(ah);
+ }
+
ath9k_hw_rfbus_done(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f4864807e15b..1ec52356b5a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
- return eep->base_ext1.ant_div_control;
+ if (AR_SREV_9565(ah))
+ return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+ else
+ return eep->base_ext1.ant_div_control;
case EEP_ANTENNA_GAIN_5G:
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct ar9300_base_eep_hdr *pBase;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- ah->eeprom.ar9300_eep.macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
out:
if (len > size)
len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9565(ah)) {
if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+ /* Force WLAN LNA diversity ON */
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+ /* Force WLAN LNA diversity OFF */
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
}
}
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
- if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ && common->bt_ant_diversity)
regval |= AR_FAST_DIV_ENABLE;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 75d4fb41962f..0e5daa58a4fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 608bb4824e2a..b07f164d65cf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8dd069259e7b..7b94a6c7db3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e897648d3233..11f53589a3f3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_GC_DYN2040_EN;
/* Configure control (primary) channel at +-10MHz */
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
+ if (IS_CHAN_HT40PLUS(chan))
phymode |= AR_PHY_GC_DYN2040_PRI_CH;
}
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
/* Configure MAC for 20/40 operation */
- ath9k_hw_set11nmac2040(ah);
+ ath9k_hw_set11nmac2040(ah, chan);
/* global transmit timeout (25 TUs default)*/
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
- REG_WRITE(ah, AR_PCU_MISC_MODE2,
- val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
- REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ val |= AR_AGG_WEP_ENABLE_FIX |
+ AR_AGG_WEP_ENABLE |
+ AR_PCU_MISC_MODE2_CFP_IGNORE;
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,41 +681,22 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
{
int ret;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- if (chan->channel <= 5350)
- ret = 1;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 3;
- else
- ret = 5;
- break;
-
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- if (chan->channel <= 5350)
- ret = 2;
- else if ((chan->channel > 5350) && (chan->channel <= 5600))
- ret = 4;
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 7;
else
- ret = 6;
- break;
-
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- ret = 8;
- break;
+ return 8;
+ }
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- ret = 7;
- break;
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
- default:
- ret = -EINVAL;
- }
+ if (IS_CHAN_HT40(chan))
+ ret++;
return ret;
}
@@ -728,28 +707,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
unsigned int regWrites = 0, i;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
/*
* SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (chan == NULL)
return;
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
- chan->channel,
- chan->channelFlags);
+ chan->channel);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else if (AR_SREV_9565(ah)) {
- antconf->lna1_lna2_delta = -3;
+ antconf->lna1_lna2_switch_delta = 3;
+ antconf->lna1_lna2_delta = -9;
antconf->div_group = 3;
} else {
+ antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@@ -1489,17 +1455,24 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
} else if (AR_SREV_9565(ah)) {
if (enable) {
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
@@ -1526,28 +1499,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
unsigned int regWrites = 0;
u32 modesIndex;
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- break;
-
- default:
- return -EINVAL;
- }
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
if (modesIndex == ah->modes_index) {
*ini_reloaded = false;
@@ -1662,6 +1617,98 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
}
}
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+ static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ unsigned int i;
+
+ if (txpower <= MAX_RATE_POWER) {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
+ } else {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = MAX_RATE_POWER;
+ }
+
+ REG_WRITE(ah, 0xa458, 0);
+
+ REG_WRITE(ah, 0xa3c0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3cc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3d0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+ REG_WRITE(ah, 0xa3d4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
+ REG_WRITE(ah, 0xa3e4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
+ REG_WRITE(ah, 0xa3e8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
+ REG_WRITE(ah, 0xa3d8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+ REG_WRITE(ah, 0xa3dc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
+ REG_WRITE(ah, 0xa3ec,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1701,6 +1748,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
#endif
+ ops->tx99_start = ar9003_hw_tx99_start;
+ ops->tx99_stop = ar9003_hw_tx99_stop;
+ ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6fd752321e36..fca624322dc8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -343,8 +343,12 @@
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 74de3539c2c8..934418872e8e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
}
}
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+ int agc, caldac;
+
+ if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ return;
+
+ if ((index != 5) || (chain >= 2))
+ return;
+
+ agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+ if (!agc)
+ return;
+
+ caldac = ah->caldata->caldac[chain];
+ ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+ caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+ ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
{
u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+ ar9003_hw_patch_rtt(ah, i, chain);
+
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"RTT value at idx %d, chain %d is: 0x%x\n",
i, chain, ah->caldata->rtt_table[chain][i]);
}
}
- ah->caldata->rtt_done = true;
+ set_bit(RTT_DONE, &ah->caldata->cal_flags);
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
}
if (ah->caldata)
- ah->caldata->rtt_done = false;
+ clear_bit(RTT_DONE, &ah->caldata->cal_flags);
}
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ah->caldata)
return false;
- if (!ah->caldata->rtt_done)
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+ if (IS_CHAN_2GHZ(chan)){
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ }
+
+ if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
return false;
ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ ar9003_hw_rtt_set_mask(ah, 0x30);
+ else
+ ar9003_hw_rtt_set_mask(ah, 0x10);
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 88ff1d7b53ab..6f899c692647 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,7 +20,17 @@
/* AR9485 1.1 */
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
/* Addr allmodes */
@@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
{0x00009e00, 0x037216a0},
{0x00009e04, 0x00182020},
{0x00009e18, 0x00000000},
+ {0x00009e20, 0x000003a8},
{0x00009e2c, 0x00004121},
{0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +373,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +553,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +647,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
-
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+ {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x00009e00, 0x03721b20},
{0x00009e04, 0x00082020},
{0x00009e18, 0x0300501e},
+ {0x00009e20, 0x000003ba},
{0x00009e2c, 0x00002e21},
{0x00009e44, 0x02182324},
{0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index e85a8b076c22..a8c757b6124f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0xaaaaaa6e},
+ {0x0000a3ac, 0x3c466455},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a404, 0x00000000},
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
+ {0x0000a414, 0x1ce739c5},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
+ {0x0000a41c, 0x1ce739c5},
{0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
+ {0x0000a424, 0x1ce739c5},
{0x0000a428, 0x000001ce},
{0x0000a42c, 0x1ce739ce},
{0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
{0x00004050, 0x00300300},
{0x0000406c, 0x00100000},
+ {0x00009e20, 0x000003b6},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 2ee35f677c0e..e7a38d844a6a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -64,7 +64,6 @@ struct ath_node;
struct ath_config {
u16 txpowlimit;
- u8 cabqReadytime;
};
/*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
u8 baw_tracked : 1;
};
+struct ath_rxbuf {
+ struct list_head list;
+ struct sk_buff *bf_mpdu;
+ void *bf_desc;
+ dma_addr_t bf_daddr;
+ dma_addr_t bf_buf_addr;
+};
+
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
- struct ath_buf *buf_hold;
+ struct ath_rxbuf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
#define ATH_DUMP_BTCOEX(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, \
- "%20s : %10d\n", _s, (_val)); \
+ len += scnprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
} while (0)
enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR 0x0020
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_CUS252 0x0008
+#define ATH9K_PCI_WOW 0x0010
+#define ATH9K_PCI_BT_ANT_DIV 0x0020
+#define ATH9K_PCI_D3_L1_WAR 0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
/*
* Default cache line size, in bytes.
@@ -769,6 +778,11 @@ struct ath_softc {
enum spectral_mode spectral_mode;
struct ath_spec_scan spec_config;
+ struct ieee80211_vif *tx99_vif;
+ struct sk_buff *tx99_skb;
+ bool tx99_state;
+ s16 tx99_power;
+
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -877,6 +891,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
*/
enum ath_fft_sample_type {
ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
};
struct fft_sample_tlv {
@@ -903,6 +918,39 @@ struct fft_sample_ht20 {
u8 data[SPECTRAL_HT20_NUM_BINS];
} __packed;
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+int ath9k_tx99_init(struct ath_softc *sc);
+void ath9k_tx99_deinit(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+
void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
@@ -924,7 +972,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-bool ath9k_uses_beacons(int type);
void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
enum spectral_mode spectral_mode);
@@ -952,7 +999,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ath9k_vif_iter_data *iter_data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b5c16b3a37b9..17be35392bb4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
+ ath9k_hw_check_nav(ah);
+
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 5e8219a91e25..278365b8a895 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -63,13 +63,13 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf)
{
s8 noise = ATH_DEFAULT_NOISE_FLOOR;
- if (chan && chan->noisefloor) {
- s8 delta = chan->noisefloor -
- ATH9K_NF_CAL_NOISE_THRESH -
+ if (nf) {
+ s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
ath9k_hw_get_default_nf(ah, chan);
if (delta > 0)
noise += delta;
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
- (cal->nfcal_interference ?
+ (test_bit(NFCAL_INTF, &cal->cal_flags) ?
"not corrected (due to interference)" :
"correcting to MAX"));
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* we bypass this limit here in order to better deal
* with our environment.
*/
- if (!cal->nfcal_interference)
+ if (!test_bit(NFCAL_INTF, &cal->cal_flags))
h[i].privNF = limit->max;
}
}
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
- cal->nfcal_interference = false;
+ clear_bit(NFCAL_INTF, &cal->cal_flags);
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, conf->chandef.chan->center_freq);
+ currCal->calData->calType, ah->curchan->chan->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
- ah->caldata->nfcal_pending = true;
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (chainmask & (1 << i)) {
s16 nfval;
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
- if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,10 +389,10 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
h = caldata->nfCalHist;
- caldata->nfcal_pending = false;
+ clear_bit(NFCAL_PENDING, &caldata->cal_flags);
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
return true;
}
EXPORT_SYMBOL(ath9k_hw_getnf);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
- ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
- if (!caldata->nfcal_pending)
+ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
- caldata->nfcal_interference = true;
+ set_bit(NFCAL_INTF, &caldata->cal_flags);
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3d70b8c2bcdd..b8ed95e9a335 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -116,7 +116,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf);
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index d3063c21e16c..a7e5a05b2eff 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
- u32 chanmode = 0;
-
- switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_G_HT40PLUS;
- else
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chanmode = CHANNEL_A_HT40PLUS;
- else
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
/*
* Update internal channel flags.
*/
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chandef->chan->center_freq;
- ichan->chan = chandef->chan;
-
- if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
+ struct ieee80211_channel *chan = chandef->chan;
+ u16 flags = 0;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ flags |= CHANNEL_5GHZ;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
- ichan->channelFlags |= CHANNEL_QUARTER;
+ flags |= CHANNEL_QUARTER;
break;
case NL80211_CHAN_WIDTH_10:
- ichan->channelFlags |= CHANNEL_HALF;
+ flags |= CHANNEL_HALF;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
+ flags |= CHANNEL_HT;
+ break;
case NL80211_CHAN_WIDTH_40:
- ichan->chanmode = ath9k_get_extchanmode(chandef);
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+ else
+ flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
break;
default:
WARN_ON(1);
}
+
+ ichan->channelFlags = flags;
}
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
/*
* Get the internal channel reference.
*/
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef)
{
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ struct ieee80211_channel *curchan = chandef->chan;
struct ath9k_channel *channel;
u8 chan_idx;
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+ ath9k_cmn_update_ichannel(channel, chandef);
return channel;
}
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
int ath9k_cmn_count_streams(unsigned int chainmask, int max)
{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e039bcbfbd79..eb85e1bdca88 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -43,10 +43,9 @@
(((x) + ((mul)/2)) / (mul))
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
- struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c088744a6bfb..83a2c59f680b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
return -ENOMEM;
if (common->disable_ani) {
- len += snprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%s: %s\n",
+ "ANI", "DISABLED");
goto exit;
}
- len += snprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += snprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %s\n",
+ "ANI", "ENABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "ANI RESET", ah->stats.ast_ani_reset);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR UP", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "SPUR DOWN", ah->stats.ast_ani_spurup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP UP", ah->stats.ast_ani_stepup);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ "CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
- len += snprintf(buf + len, size - len, "%s\n",
- "Antenna Diversity Combining is disabled");
+ len += scnprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
- len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
- lna_conf_str[div_ant_conf.main_lna_conf]);
- len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
- lna_conf_str[div_ant_conf.alt_lna_conf]);
- len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
- as_main->rssi_avg);
- len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
- as_alt->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
- len += snprintf(buf + len, size - len, "-------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "TOTAL COUNT",
- as_main->recv_cnt,
- as_alt->recv_cnt);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
-
- len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
- len += snprintf(buf + len, size - len, "--------------------\n");
-
- len += snprintf(buf + len, size - len, "%30s%15s\n",
- "MAIN", "ALT");
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 + LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
- len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
- "LNA1 - LNA2",
- as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
- as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += scnprintf(buf + len, size - len, "-------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += scnprintf(buf + len, size - len, "--------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Raw DMA Debug values:\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
- i, val[i]);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+ i, val[i]);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "%2d %2x %1x %2x %2x\n",
- i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
- val[2] & (0x7 << (i * 3)) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ val[2] & (0x7 << (i * 3)) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, DMA_BUF_LEN - len,
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
- REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+ REG_READ_D(ah, AR_OBS_BUS_1));
+ len += scnprintf(buf + len, DMA_BUF_LEN - len,
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
#define PR_IS(a, s) \
do { \
- len += snprintf(buf + len, mxlen - len, \
- "%21s: %10u\n", a, \
- sc->debug.stats.istats.s); \
+ len += scnprintf(buf + len, mxlen - len, \
+ "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
- len += snprintf(buf + len, mxlen - len,
- "SYNC_CAUSE stats:\n");
+ len += scnprintf(buf + len, mxlen - len,
+ "SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
+ len += scnprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += scnprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += scnprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += scnprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
- len += snprintf(buf + len, size - len, "(CAB): ");
+ len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int reg;
u32 rxfilter;
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID: %pM\n", common->curbssid);
- len += snprintf(buf + len, sizeof(buf) - len,
- "BSSID-MASK: %pM\n", common->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len,
- "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID: %pM\n", common->curbssid);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "BSSID-MASK: %pM\n", common->bssidmask);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += snprintf(buf + len, sizeof(buf) - len,
- "RXFILTER: 0x%x", rxfilter);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
- len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
- len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
- len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+ len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
- len += snprintf(buf + len, sizeof(buf) - len, " CST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
- len += snprintf(buf + len, sizeof(buf) - len, " RX");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
- len += snprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
- " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
- iter_data.naps, iter_data.nstations, iter_data.nmeshes,
- iter_data.nwds, iter_data.nadhocs,
- sc->nvifs, sc->nbcnvifs);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs,
+ sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Hang",
+ sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Baseband Watchdog",
+ sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Fatal HW Error",
+ sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX HW error",
+ sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "TX Path Hang",
+ sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "PLL RX Hang",
+ sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MCI Reset",
+ sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
#define RXS_ERR(s, e) \
do { \
- len += snprintf(buf + len, size - len, \
- "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.e); \
+ len += scnprintf(buf + len, size - len, \
+ "%22s : %10u\n", s, \
+ sc->debug.stats.rxstats.e);\
} while (0)
struct ath_softc *sc = file->private_data;
@@ -1048,6 +1050,9 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
char buf[32];
ssize_t len;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
@@ -1439,22 +1444,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "Channel Noise Floor : %d\n", ah->noise);
- len += snprintf(buf + len, size - len,
- "Chain | privNF | # Readings | NF Readings\n");
+ len += scnprintf(buf + len, size - len,
+ "Channel Noise Floor : %d\n", ah->noise);
+ len += scnprintf(buf + len, size - len,
+ "Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
- len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
- i, h[i].privNF, nread);
+ len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+ i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
- len += snprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " %d", h[i].nfCalBuffer[j]);
+ len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
@@ -1543,8 +1548,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
- len = snprintf(buf, size, "%s\n",
- "BTCOEX is disabled");
+ len = scnprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
goto exit;
}
@@ -1582,43 +1587,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!an->sta->ht_cap.ht_supported) {
- len = snprintf(buf, size, "%s\n",
- "HT not supported");
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
goto exit;
}
- len = snprintf(buf, size, "Max-AMPDU: %d\n",
- an->maxampdu);
- len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
- an->mpdudensity);
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
- len += snprintf(buf + len, size - len,
- "%2s%7s\n", "AC", "SCHED");
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%2d%7d\n",
- acno, ac->sched);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
ath_txq_unlock(sc, txq);
}
- len += snprintf(buf + len, size - len,
- "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
- "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
- "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
ath_txq_lock(sc, txq);
- len += snprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
- tid->tidno, tid->seq_start, tid->seq_next,
- tid->baw_size, tid->baw_head, tid->baw_tail,
- tid->bar_index, tid->sched, tid->paused);
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno, tid->seq_start, tid->seq_next,
+ tid->baw_size, tid->baw_head, tid->baw_tail,
+ tid->bar_index, tid->sched, tid->paused);
ath_txq_unlock(sc, txq);
}
exit:
@@ -1773,6 +1778,111 @@ void ath9k_deinit_debug(struct ath_softc *sc)
}
}
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1864,5 +1974,15 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
+ if (config_enabled(CONFIG_ATH9K_TX99) &&
+ AR_SREV_9300_20_OR_LATER(ah)) {
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 6e1556fa2f3e..d6e3fa4299a4 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -193,12 +193,12 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
- TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ len += scnprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index 3c839f06a06a..c6fa3d5b5d74 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,7 +17,7 @@
#ifndef ATH9K_DFS_H
#define ATH9K_DFS_H
-#include "dfs_pattern_detector.h"
+#include "../dfs_pattern_detector.h"
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
/**
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 3c6e4138a95d..90b8342d1ed4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -20,16 +20,16 @@
#include "ath9k.h"
#include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
-
-struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
#define ATH9K_DFS_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- sc->debug.stats.dfs_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
#define ATH9K_DFS_POOL_STAT(s, p) \
- len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
- global_dfs_pool_stats.p);
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ dfs_pool_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -44,12 +44,15 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len, "DFS support for "
- "macVersion = 0x%x, macRev = 0x%x: %s\n",
- hw_ver->macVersion, hw_ver->macRev,
- (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ if (sc->dfs_detector)
+ dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
- len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
ATH9K_DFS_STAT("pulse events reported ", pulses_total);
ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@@ -59,11 +62,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
- len += snprintf(buf + len, size - len, "Radar detector statistics "
- "(current DFS region: %d)\n", sc->dfs_detector->region);
+ len += scnprintf(buf + len, size - len, "Radar detector statistics "
+ "(current DFS region: %d)\n",
+ sc->dfs_detector->region);
ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
ATH9K_DFS_STAT("Radars detected ", radar_detected);
- len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index e36810a4b585..0a7ddf4c88c9 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -51,25 +51,11 @@ struct ath_dfs_stats {
u32 radar_detected;
};
-/**
- * struct ath_dfs_pool_stats - DFS Statistics for global pools
- */
-struct ath_dfs_pool_stats {
- u32 pool_reference;
- u32 pulse_allocated;
- u32 pulse_alloc_error;
- u32 pulse_used;
- u32 pseq_allocated;
- u32 pseq_alloc_error;
- u32 pseq_used;
-};
#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
void ath9k_dfs_init_debug(struct ath_softc *sc);
-#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
-#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#else
@@ -77,8 +63,6 @@ extern struct ath_dfs_pool_stats global_dfs_pool_stats;
#define DFS_STAT_INC(sc, c) do { } while (0)
static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
-#define DFS_POOL_STAT_INC(c) do { } while (0)
-#define DFS_POOL_STAT_DEC(c) do { } while (0)
#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9ea8e4b779c9..b4091716e9b3 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_dump_4k_modal_eeprom(buf, len, size,
- &eep->modalHeader);
+ &eep->modalHeader);
goto out;
}
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 3ae1f3df0637..e1d0c217c104 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 1c25368b3836..39107e31e79a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
- len += snprintf(buf + len, size - len,
- "%20s :\n", "2GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
- len += snprintf(buf + len, size - len,
- "%20s :\n", "5GHz modal Header");
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
out:
if (len > size)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4b412aaf4f36..c34f21241da9 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
- len += snprintf(buf + len, size - len, "BT Weights: ");
+ len += scnprintf(buf + len, size - len, "BT Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->bt_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "WLAN Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
- len += snprintf(buf + len, size - len, "%08x ",
- btcoex_hw->wlan_weight[i]);
- len += snprintf(buf + len, size - len, "\n");
- len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "Tx Priorities: ");
for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
- len += snprintf(buf + len, size - len, "%08x ",
+ len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->tx_prio[i]);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index c1b45e2f8481..fb071ee4fcfb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RX",
- be32_to_cpu(cmd_rsp.rx));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RX",
+ be32_to_cpu(cmd_rsp.rx));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXORN",
- be32_to_cpu(cmd_rsp.rxorn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXORN",
+ be32_to_cpu(cmd_rsp.rxorn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "RXEOL",
- be32_to_cpu(cmd_rsp.rxeol));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXEOL",
+ be32_to_cpu(cmd_rsp.rxeol));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXURN",
- be32_to_cpu(cmd_rsp.txurn));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXURN",
+ be32_to_cpu(cmd_rsp.txurn));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TXTO",
- be32_to_cpu(cmd_rsp.txto));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXTO",
+ be32_to_cpu(cmd_rsp.txto));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CST",
- be32_to_cpu(cmd_rsp.cst));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CST",
+ be32_to_cpu(cmd_rsp.cst));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Xretries",
- be32_to_cpu(cmd_rsp.xretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Xretries",
+ be32_to_cpu(cmd_rsp.xretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "FifoErr",
- be32_to_cpu(cmd_rsp.fifoerr));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "FifoErr",
+ be32_to_cpu(cmd_rsp.fifoerr));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Filtered",
- be32_to_cpu(cmd_rsp.filtered));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Filtered",
+ be32_to_cpu(cmd_rsp.filtered));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "TimerExp",
- be32_to_cpu(cmd_rsp.timer_exp));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TimerExp",
+ be32_to_cpu(cmd_rsp.timer_exp));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "ShortRetries",
- be32_to_cpu(cmd_rsp.shortretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "ShortRetries",
+ be32_to_cpu(cmd_rsp.shortretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "LongRetries",
- be32_to_cpu(cmd_rsp.longretries));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "LongRetries",
+ be32_to_cpu(cmd_rsp.longretries));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "QueueNull",
- be32_to_cpu(cmd_rsp.qnull));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "QueueNull",
+ be32_to_cpu(cmd_rsp.qnull));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "EncapFail",
- be32_to_cpu(cmd_rsp.encap_fail));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "EncapFail",
+ be32_to_cpu(cmd_rsp.encap_fail));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "NoBuf",
- be32_to_cpu(cmd_rsp.nobuf));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostSend",
- be32_to_cpu(cmd_rsp.host_send));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostSend",
+ be32_to_cpu(cmd_rsp.host_send));
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "HostDone",
- be32_to_cpu(cmd_rsp.host_done));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostDone",
+ be32_to_cpu(cmd_rsp.host_done));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers queued",
- priv->debug.tx_stats.buf_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "Buffers completed",
- priv->debug.tx_stats.buf_completed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs queued",
- priv->debug.tx_stats.skb_queued);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs success",
- priv->debug.tx_stats.skb_success);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "SKBs failed",
- priv->debug.tx_stats.skb_failed);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "CAB queued",
- priv->debug.tx_stats.cab_queued);
-
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BE queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "BK queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VI queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
- len += snprintf(buf + len, sizeof(buf) - len,
- "%20s : %10u\n", "VO queued",
- priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs success",
+ priv->debug.tx_stats.skb_success);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs failed",
+ priv->debug.tx_stats.skb_failed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CAB queued",
+ priv->debug.tx_stats.cab_queued);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
- len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
+ len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
+ priv->debug.rx_stats.err_phy_stats[p]);
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += snprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "CRC ERR",
+ priv->debug.rx_stats.err_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT CRC ERR",
+ priv->debug.rx_stats.err_decrypt_crc);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "MIC ERR",
+ priv->debug.rx_stats.err_mic);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "PRE-DELIM CRC ERR",
+ priv->debug.rx_stats.err_pre_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "POST-DELIM CRC ERR",
+ priv->debug.rx_stats.err_post_delim);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "DECRYPT BUSY ERR",
+ priv->debug.rx_stats.err_decrypt_busy);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "TOTAL PHY ERR",
+ priv->debug.rx_stats.err_phy);
PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
priv->tx.tx_slot, MAX_TX_BUF_NUM);
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
- "Used slots : %d\n",
- bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "Used slots : %d\n",
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Failed queue", skb_queue_len(&priv->tx.tx_failed));
spin_lock_bh(&priv->tx.tx_lock);
- len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
- "Queued count", priv->tx.queued_cnt);
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Queued count", priv->tx.queued_cnt);
spin_unlock_bh(&priv->tx.tx_lock);
if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Major Version",
+ pBase->version >> 12);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Minor Version",
+ pBase->version & 0xFFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Checksum",
+ pBase->checksum);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "Length",
+ pBase->length);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain1",
+ pBase->regDmn[0]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n", "RegDomain2",
+ pBase->regDmn[1]);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Mask", pBase->txMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "RX Mask", pBase->rxMask);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 5GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Allow 2GHz",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 2GHz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT20",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Disable 5Ghz HT40",
+ !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Big Endian",
+ !!(pBase->eepMisc & 0x01));
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Major Ver",
+ (pBase->binBuildNumber >> 24) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Minor Ver",
+ (pBase->binBuildNumber >> 16) & 0xFF);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "Cal Bin Build",
+ (pBase->binBuildNumber >> 8) & 0xFF);
/*
* UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_header_4k *pBase4k =
&priv->ah->eeprom.map4k.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "TX Gain type",
+ pBase4k->txGainType);
}
/*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_ar9287_header *pBase9287 =
&priv->ah->eeprom.map9287.baseEepHeader;
- len += snprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10ddB\n",
+ "Power Table Offset",
+ pBase9287->pwrTableOffset);
- len += snprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10d\n",
+ "OpenLoop Power Ctrl",
+ pBase9287->openLoopPwrCntl);
}
- len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
if (len > size)
len = size;
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
do { \
if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
+ len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+ _s, (_val), "|"); \
} \
if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += snprintf(buf + len, size - len, "%9d\n", \
+ len += scnprintf(buf + len, size - len, "%9d\n",\
(_val)); \
} \
} while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += snprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
+ len += scnprintf(buf + len, size - len,
+ "%31s %15s\n", "2G", "5G");
+ len += scnprintf(buf + len, size - len,
+ "%32s %16s\n", "====", "====\n");
PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index d44258172c0f..9a2657fdd9cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,30 +24,10 @@
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
{
- enum htc_phymode mode;
-
- mode = -EINVAL;
-
- switch (ichan->chanmode) {
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- mode = HTC_MODE_11NG;
- break;
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- mode = HTC_MODE_11NA;
- break;
- default:
- break;
- }
+ if (IS_CHAN_5GHZ(ichan))
+ return HTC_MODE_11NA;
- WARN_ON(mode < 0);
-
- return mode;
+ return HTC_MODE_11NG;
}
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
WMI_CMD(WMI_FLUSH_RECV_CMDID);
/* setup initial channel */
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- &hw->conf.chandef);
-
+ ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 83f4927aeaca..4f9378ddf07f 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,22 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+ if (ath9k_hw_ops(ah)->tx99_set_txpower)
+ ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ecc6ec4a1edb..54b04155e43b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
unsigned int clockrate;
/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
clockrate = 117;
- else if (!ah->curchan) /* should really check for CCK instead */
+ else if (!chan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
- else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+ else if (IS_CHAN_2GHZ(chan))
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
- if (conf_is_ht40(conf))
+ if (IS_CHAN_HT40(chan))
clockrate *= 2;
if (ah->curchan) {
- if (IS_CHAN_HALF_RATE(ah->curchan))
+ if (IS_CHAN_HALF_RATE(chan))
clockrate /= 2;
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ if (IS_CHAN_QUARTER_RATE(chan))
clockrate /= 4;
}
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay)
{
- if (IS_CHAN_B(chan))
- hw_delay = (4 * hw_delay) / 22;
- else
- hw_delay /= 10;
+ hw_delay /= 10;
if (IS_CHAN_HALF_RATE(chan))
hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
return;
}
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+ if (IS_CHAN_HT40PLUS(chan)) {
centers->synth_center =
chan->channel + HT40_CHANNEL_CENTER_SHIFT;
extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath9k_hw_ani_init(ah);
+ /*
+ * EEPROM needs to be initialized before we do this.
+ * This is required for regulatory compliance.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ if ((regdmn & 0xF0) == CTL_FCC) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+ }
+ }
+
return 0;
}
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
int acktimeout, ctstimeout, ack_offset = 0;
int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->chandef.chan &&
- conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+ if (IS_CHAN_2GHZ(chan) &&
!IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
acktimeout += 64 - sifstime - ah->slottime;
ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
- if (IS_CHAN_B(chan))
- ctl |= CTL_11B;
- else if (IS_CHAN_G(chan))
+ if (IS_CHAN_2GHZ(chan))
ctl |= CTL_11G;
else
ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
int r;
if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
- u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
- band_switch = (cur != new);
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+ mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
}
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
return true;
}
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ val = REG_READ(ah, AR_NAV);
+ if (val != 0xdeadbeef && val > 0x7fff) {
+ ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+ REG_WRITE(ah, AR_NAV, 0);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
goto fail;
/*
- * If cross-band fcc is not supoprted, bail out if
- * either channelFlags or chanmode differ.
- *
- * chanmode will be different if the HT operating mode
- * changes because of CSA.
+ * If cross-band fcc is not supoprted, bail out if channelFlags differ.
*/
- if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
-
- if (chan->chanmode != ah->curchan->chanmode)
- goto fail;
- }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+ chan->channelFlags != ah->curchan->channelFlags)
+ goto fail;
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* re-using are present.
*/
if (AR_SREV_9462(ah) && (ah->caldata &&
- (!ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_done)))
+ (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,15 +1878,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
- chan->channelFlags != caldata->channelFlags ||
- chan->chanmode != caldata->chanmode)) {
+ chan->channelFlags != caldata->channelFlags)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
} else if (caldata) {
- caldata->paprd_packet_sent = false;
+ clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
}
- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
if (fastcc) {
r = ath9k_hw_do_fastcc(ah, chan);
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_mfp(ah);
- if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
- ath9k_hw_set_delta_slope(ah, chan);
-
+ ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
u32 macmode;
- if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+ if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
macmode = AR_2040_JOINED_RX_CLEAR;
else
macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
- used = snprintf(hw_name, len,
- "Atheros AR%s Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev);
}
else {
- used = snprintf(hw_name, len,
- "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
- ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
- ah->hw_version.macRev,
- ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
- AR_RADIO_SREV_MAJOR)),
- ah->hw_version.phyRev);
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev,
+ ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+ & AR_RADIO_SREV_MAJOR)),
+ ah->hw_version.phyRev);
}
hw_name[used] = '\0';
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 69a907b55a73..9ea24f1cba73 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -98,8 +98,8 @@
#define PR_EEP(_s, _val) \
do { \
- len += snprintf(buf + len, size - len, "%20s : %10d\n", \
- _s, (_val)); \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
} while (0)
#define SM(_v, _f) (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@ enum ath9k_int {
ATH9K_INT_NOCARD = 0xffffffff
};
-#define CHANNEL_CCK 0x00020
-#define CHANNEL_OFDM 0x00040
-#define CHANNEL_2GHZ 0x00080
-#define CHANNEL_5GHZ 0x00100
-#define CHANNEL_PASSIVE 0x00200
-#define CHANNEL_DYN 0x00400
-#define CHANNEL_HALF 0x04000
-#define CHANNEL_QUARTER 0x08000
-#define CHANNEL_HT20 0x10000
-#define CHANNEL_HT40PLUS 0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL \
- (CHANNEL_OFDM| \
- CHANNEL_CCK| \
- CHANNEL_2GHZ | \
- CHANNEL_5GHZ | \
- CHANNEL_HT20 | \
- CHANNEL_HT40PLUS | \
- CHANNEL_HT40MINUS)
-
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+enum ath9k_cal_flags {
+ RTT_DONE,
+ PAPRD_PACKET_SENT,
+ PAPRD_DONE,
+ NFCAL_PENDING,
+ NFCAL_INTF,
+ TXIQCAL_DONE,
+ TXCLCAL_DONE,
+ SW_PKDET_DONE,
+};
+
struct ath9k_hw_cal_data {
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
+ unsigned long cal_flags;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
- bool rtt_done;
- bool paprd_packet_sent;
- bool paprd_done;
- bool nfcal_pending;
- bool nfcal_interference;
- bool done_txiqcal_once;
- bool done_txclcal_once;
+ u8 caldac[2];
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@ struct ath9k_hw_cal_data {
struct ath9k_channel {
struct ieee80211_channel *chan;
u16 channel;
- u32 channelFlags;
- u32 chanmode;
+ u16 channelFlags;
s16 noisefloor;
};
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
- (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
- (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
- (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define CHANNEL_5GHZ BIT(0)
+#define CHANNEL_HALF BIT(1)
+#define CHANNEL_QUARTER BIT(2)
+#define CHANNEL_HT BIT(3)
+#define CHANNEL_HT40PLUS BIT(4)
+#define CHANNEL_HT40MINUS BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
- ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
-
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
- ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
- ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+ (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+ (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
enum ath9k_power_mode {
ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@ struct ath_hw_antcomb_conf {
u8 main_gaintb;
u8 alt_gaintb;
int lna1_lna2_delta;
+ int lna1_lna2_switch_delta;
u8 div_group;
};
@@ -726,6 +703,10 @@ struct ath_hw_ops {
void (*spectral_scan_trigger)(struct ath_hw *ah);
void (*spectral_scan_wait)(struct ath_hw *ah);
+ void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+ void (*tx99_stop)(struct ath_hw *ah);
+ void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
#endif
@@ -1026,10 +1007,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 9a1f349f9260..d8643ebabd30 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
- struct ath_buf *bf;
int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
- if (!bf)
- return -ENOMEM;
+ if (is_tx) {
+ struct ath_buf *bf;
+
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ } else {
+ struct ath_rxbuf *bf;
- for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += (desc_len * ndesc);
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
+ bsize = sizeof(struct ath_rxbuf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
}
+ list_add_tail(&bf->list, head);
}
- list_add_tail(&bf->list, head);
}
return 0;
}
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+ if (sc->driver_data & ATH9K_PCI_CUS252)
+ ath_info(common, "CUS252 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+ ath_info(common, "WB335 1-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+ ath_info(common, "WB335 2-ANT card detected\n");
+
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -627,7 +680,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
sc->sc_ah = ah;
pCap = &ah->caps;
- sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
+ common = ath9k_hw_common(ah);
+ sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+ sc->tx99_power = MAX_RATE_POWER + 1;
if (!pdata) {
ah->ah_flags |= AH_USE_EEPROM;
@@ -641,7 +696,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->external_reset = pdata->external_reset;
}
- common = ath9k_hw_common(ah);
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
common->ah = ah;
@@ -732,6 +786,7 @@ err_queues:
ath9k_hw_deinit(ah);
err_hw:
ath9k_eeprom_release(sc);
+ dev_kfree_skb_any(sc->tx99_skb);
return ret;
}
@@ -748,7 +803,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
- ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+ ath9k_cmn_get_channel(sc->hw, ah, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@@ -789,9 +844,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
-
static const struct ieee80211_iface_limit if_dfs_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) },
};
static const struct ieee80211_iface_combination if_comb[] = {
@@ -850,17 +905,18 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_WDS) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_WDS) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ }
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2f831db396ac..aed7e29dc50f 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -28,6 +28,13 @@ void ath_tx_complete_poll_work(struct work_struct *work)
int i;
bool needreset = false;
+
+ if (sc->tx99_state) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "skip tx hung detection on tx99\n");
+ return;
+ }
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
@@ -70,7 +77,7 @@ void ath_hw_check(struct work_struct *work)
ath9k_ps_wakeup(sc);
is_alive = ath9k_hw_check_alive(sc->sc_ah);
- if (is_alive && !AR_SREV_9300(sc->sc_ah))
+ if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
goto out;
else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
ath_dbg(common, RESET,
@@ -141,6 +148,9 @@ void ath_hw_pll_work(struct work_struct *work)
if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
return;
+ if (sc->tx99_state)
+ return;
+
ath9k_ps_wakeup(sc);
pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
ath9k_ps_restore(sc);
@@ -184,7 +194,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
- if (!caldata || !caldata->paprd_done) {
+ if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
}
@@ -256,7 +266,9 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
- if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+ if (!caldata ||
+ !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+ test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
}
@@ -316,7 +328,7 @@ void ath_paprd_calibrate(struct work_struct *work)
kfree_skb(skb);
if (chain_ok) {
- caldata->paprd_done = true;
+ set_bit(PAPRD_DONE, &caldata->cal_flags);
ath_paprd_activate(sc);
}
@@ -343,7 +355,7 @@ void ath_ani_calibrate(unsigned long data)
u32 cal_interval, short_cal_interval, long_cal_interval;
unsigned long flags;
- if (ah->caldata && ah->caldata->nfcal_interference)
+ if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
long_cal_interval = ATH_LONG_CALINTERVAL_INT;
else
long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +444,7 @@ set_timer:
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
- if (!ah->caldata->paprd_done) {
+ if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
} else if (!ah->paprd_table_write_done) {
ath9k_ps_wakeup(sc);
@@ -516,7 +528,8 @@ void ath_update_survey_nf(struct ath_softc *sc, int channel)
if (chan->noisefloor) {
survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = ath9k_hw_getchan_noise(ah, chan);
+ survey->noise = ath9k_hw_getchan_noise(ah, chan,
+ chan->noisefloor);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index a3eff0986a3f..6a18f9d3e9cc 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_channel *chan = ah->curchan;
struct ath9k_tx_queue_info *qi;
u32 cwMin, chanCwMin, value;
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
- if (chan && IS_CHAN_B(chan))
- chanCwMin = INIT_CWMIN_11B;
- else
- chanCwMin = INIT_CWMIN;
+ chanCwMin = INIT_CWMIN;
for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
} else
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index bfccaceed44e..e3eed81f2439 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
#define ATH9K_DECOMP_MASK_SIZE 128
-#define ATH9K_READY_TIME_LO_BOUND 50
-#define ATH9K_READY_TIME_HI_BOUND 96
enum ath9k_pkt_type {
ATH9K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 709301f88dcd..74f452c7b166 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -312,17 +312,91 @@ out:
* by reseting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath9k_channel *hchan;
+ struct ieee80211_channel *chan = chandef->chan;
+ unsigned long flags;
+ bool offchannel;
+ int pos = chan->hw_value;
+ int old_pos = -1;
int r;
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
+ offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+ if (ah->curchan)
+ old_pos = ah->curchan - &ah->channels[0];
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ chan->center_freq, chandef->width);
+
+ /* update survey stats for the old channel before switching */
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath9k_cmn_get_channel(hw, ah, chandef);
+
+ /*
+ * If the operating channel changes, change the survey in-use flags
+ * along with it.
+ * Reset the survey data for the new channel, unless we're switching
+ * back to the operating channel from an off-channel operation.
+ */
+ if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+ sc->cur_survey = &sc->survey[pos];
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+ memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+ }
+
+ hchan = &sc->sc_ah->channels[pos];
r = ath_reset_internal(sc, hchan);
+ if (r)
+ return r;
- return r;
+ /*
+ * The most recent snapshot of channel->noisefloor for the old
+ * channel is only available after the hardware reset. Copy it to
+ * the survey stats now.
+ */
+ if (old_pos >= 0)
+ ath_update_survey_nf(sc, old_pos);
+
+ /*
+ * Enable radar pulse detection if on a DFS channel. Spectral
+ * scanning and radar detection can not be used concurrently.
+ */
+ if (hw->conf.radar_enabled) {
+ u32 rxfilter;
+
+ /* set HW specific DFS configuration */
+ ath9k_hw_set_radar_params(ah);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR;
+ ath9k_hw_setrxfilter(ah, rxfilter);
+ ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+ chan->center_freq);
+ } else {
+ /* perform spectral scan if requested. */
+ if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+ }
+
+ return 0;
}
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -372,6 +446,13 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -410,10 +491,9 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
-out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
-
+out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
@@ -594,7 +674,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- init_channel = ath9k_cmn_get_curchannel(hw, ah);
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, false);
@@ -797,7 +877,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+ ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
ath9k_hw_phy_disable(ah);
@@ -816,7 +896,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG, "Driver halt\n");
}
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
{
switch (type) {
case NL80211_IFTYPE_AP:
@@ -966,6 +1046,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (sc->nvifs >= 1) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+ sc->tx99_vif = vif;
+ }
+
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -994,9 +1082,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ ath_dbg(common, CONFIG, "Change Interface\n");
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1026,6 +1120,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
sc->nvifs--;
+ sc->tx99_vif = NULL;
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1047,6 +1142,9 @@ static void ath9k_enable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = true;
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1063,6 +1161,9 @@ static void ath9k_disable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
sc->ps_enabled = false;
ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1086,6 +1187,9 @@ void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
struct ath_common *common = ath9k_hw_common(ah);
u32 rxfilter;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
ath_err(common, "spectrum analyzer not implemented on this hardware\n");
return;
@@ -1201,81 +1305,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- int pos = curchan->hw_value;
- int old_pos = -1;
- unsigned long flags;
-
- if (ah->curchan)
- old_pos = ah->curchan - &ah->channels[0];
-
- ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
- curchan->center_freq, hw->conf.chandef.width);
-
- /* update survey stats for the old channel before switching */
- spin_lock_irqsave(&common->cc_lock, flags);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- &conf->chandef);
-
- /*
- * If the operating channel changes, change the survey in-use flags
- * along with it.
- * Reset the survey data for the new channel, unless we're switching
- * back to the operating channel from an off-channel operation.
- */
- if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
- sc->cur_survey != &sc->survey[pos]) {
-
- if (sc->cur_survey)
- sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
- sc->cur_survey = &sc->survey[pos];
-
- memset(sc->cur_survey, 0, sizeof(struct survey_info));
- sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
- } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
- memset(&sc->survey[pos], 0, sizeof(struct survey_info));
- }
-
- if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+ if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return -EINVAL;
}
-
- /*
- * The most recent snapshot of channel->noisefloor for the old
- * channel is only available after the hardware reset. Copy it to
- * the survey stats now.
- */
- if (old_pos >= 0)
- ath_update_survey_nf(sc, old_pos);
-
- /*
- * Enable radar pulse detection if on a DFS channel. Spectral
- * scanning and radar detection can not be used concurrently.
- */
- if (hw->conf.radar_enabled) {
- u32 rxfilter;
-
- /* set HW specific DFS configuration */
- ath9k_hw_set_radar_params(ah);
- rxfilter = ath9k_hw_getrxfilter(ah);
- rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR;
- ath9k_hw_setrxfilter(ah, rxfilter);
- ath_dbg(common, DFS, "DFS enabled at freq %d\n",
- curchan->center_freq);
- } else {
- /* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
- }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1734,6 +1769,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
unsigned long flags;
int pos;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
spin_lock_irqsave(&common->cc_lock, flags);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1766,6 +1804,9 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
mutex_lock(&sc->mutex);
ah->coverage_class = coverage_class;
@@ -2332,6 +2373,134 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
sc->csa_vif = vif;
}
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (sc->sc_flags & SC_OP_INVALID) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 815bee21c19a..0ac1b5f04256 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
chan_start = wlan_chan - 10;
chan_end = wlan_chan + 10;
- if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ if (IS_CHAN_HT40PLUS(chan))
chan_end += 20;
- else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ else if (IS_CHAN_HT40MINUS(chan))
chan_start -= 20;
/* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
if (setchannel) {
struct ath9k_hw_cal_data *caldata = &sc->caldata;
- if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ if (IS_CHAN_HT40PLUS(ah->curchan) &&
(ah->curchan->channel > caldata->channel) &&
(ah->curchan->channel <= caldata->channel + 20))
return;
- if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ if (IS_CHAN_HT40MINUS(ah->curchan) &&
(ah->curchan->channel < caldata->channel) &&
(ah->curchan->channel >= caldata->channel - 20))
return;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index d089a7cf01c4..7e4c2524b630 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
- { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(ATHEROS, 0x0036),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index d3d7c51fa6c8..d829bb62a3fc 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
int used_mcs = 0, used_htmode = 0;
if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = snprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
+ used_mcs = scnprintf(mcs, 5, "%d",
+ rc->rate_table->info[i].ratecode);
if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT40");
+ used_htmode = scnprintf(htmode, 5, "HT40");
else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = snprintf(htmode, 5, "HT20");
+ used_htmode = scnprintf(htmode, 5, "HT20");
else
- used_htmode = snprintf(htmode, 5, "????");
+ used_htmode = scnprintf(htmode, 5, "????");
}
mcs[used_mcs] = '\0';
htmode[used_htmode] = '\0';
- len += snprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
+ len += scnprintf(buf + len, max - len,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
+ stats->per);
}
if (len > max)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ab9e3a8410bc..95ddca5495d4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,7 +19,7 @@
#include "ath9k.h"
#include "ar9003_mac.h"
-#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
{
if (sc->rx.buf_hold)
ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
rx_edma = &sc->rx.rx_edma[qtype];
if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
return false;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
list_del_init(&bf->list);
skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (list_empty(&sc->rx.rxbuf)) {
ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
static void ath_rx_remove_buffer(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0, i;
u32 size;
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
ah->caps.rx_hp_qdepth);
- size = sizeof(struct ath_buf) * nbufs;
+ size = sizeof(struct ath_rxbuf) * nbufs;
bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_rx_edma_cleanup(sc);
@@ -375,6 +375,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
{
u32 rfilt;
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return 0;
+
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
@@ -427,7 +430,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
int ath_startrecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_buf *bf, *tbf;
+ struct ath_rxbuf *bf, *tbf;
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ath_edma_start_recv(sc);
@@ -447,7 +450,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
ath9k_hw_rxena(ah);
@@ -603,13 +606,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype,
struct ath_rx_status *rs,
- struct ath_buf **dest)
+ struct ath_rxbuf **dest)
{
struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +656,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
return true;
}
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs,
enum ath9k_rx_qtype qtype)
{
- struct ath_buf *bf = NULL;
+ struct ath_rxbuf *bf = NULL;
while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
if (!bf)
@@ -668,13 +671,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
struct ath_rx_status *rs)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
int ret;
if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +685,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
if (bf == sc->rx.buf_hold)
return NULL;
@@ -702,7 +705,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
ret = ath9k_hw_rxprocdesc(ah, ds, rs);
if (ret == -EINPROGRESS) {
struct ath_rx_status trs;
- struct ath_buf *tbf;
+ struct ath_rxbuf *tbf;
struct ath_desc *tds;
memset(&trs, 0, sizeof(trs));
@@ -711,7 +714,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
- tbf = list_entry(bf->list.next, struct ath_buf, list);
+ tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
/*
* On some hardware the descriptor status words could
@@ -972,14 +975,15 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
{
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath_hw *ah = sc->sc_ah;
- u8 bins[SPECTRAL_HT20_NUM_BINS];
- u8 *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample;
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
struct ath_radar_info *radar_info;
- struct ath_ht20_mag_info *mag_info;
int len = rs->rs_datalen;
int dc_pos;
- u16 length, max_magnitude;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
- /* Variation in the data length is possible and will be fixed later.
- * Note that we only support HT20 for now.
- *
- * TODO: add HT20_40 support as well.
- */
- if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
- (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
- return 1;
-
- fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
- length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
- fft_sample.tlv.length = __cpu_to_be16(length);
+ chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
- fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
- fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- fft_sample.noise = ah->noise;
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
- switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+ switch (len - fft_len) {
case 0:
/* length correct, nothing to do. */
- memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+ memcpy(bins, vdata, num_bins);
break;
case -1:
/* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+ memcpy(&bins[1], vdata, num_bins - 1);
bins[0] = vdata[0];
break;
case 2:
/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
memcpy(bins, vdata, 30);
bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
break;
case 1:
/* MAC added 2 extra bytes AND first byte is missing. */
bins[0] = vdata[0];
- memcpy(&bins[0], vdata, 30);
+ memcpy(&bins[1], vdata, 30);
bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
break;
default:
return 1;
@@ -1044,23 +1047,93 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
- dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ dc_pos = num_bins / 2;
bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
- /* mag data is at the end of the frame, in front of radar_info */
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
- /* copy raw bins without scaling them */
- memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
- fft_sample.max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
- max_magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
- fft_sample.max_index = spectral_max_index(mag_info->all_bins);
- fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample.tsf = __cpu_to_be64(tsf);
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
- ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ ath_debug_send_fft_sample(sc, tlv);
return 1;
#else
return 0;
@@ -1308,7 +1381,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
- struct ath_buf *bf;
+ struct ath_rxbuf *bf;
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index fde6da619f30..0db37f230018 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -39,7 +39,7 @@ struct wmi_fw_version {
struct wmi_event_swba {
__be64 tsf;
u8 beacon_pending;
-};
+} __packed;
/*
* 64 - HTC header - WMI header - 1 / txstatus
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index dd30452df966..09cdbcd09739 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1241,12 +1241,13 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (bf->bf_next)
info.link = bf->bf_next->bf_daddr;
else
- info.link = 0;
+ info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
if (!bf_first) {
bf_first = bf;
- info.flags = ATH9K_TXDESC_INTREQ;
+ if (!sc->tx99_state)
+ info.flags = ATH9K_TXDESC_INTREQ;
if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
txq == sc->tx.uapsdq)
info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1704,16 +1705,9 @@ int ath_cabq_update(struct ath_softc *sc)
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- /*
- * Ensure the readytime % is within the bounds.
- */
- if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
- else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
- sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
qi.tqi_readyTime = (cur_conf->beacon_interval *
- sc->config.cabqReadytime) / 100;
+ ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
return 0;
@@ -1948,7 +1942,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
- if (!edma) {
+ if (!edma || sc->tx99_state) {
TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@@ -2027,6 +2021,9 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+
+ if (!rate)
+ return;
fi->rtscts_rate = rate->hw_value;
if (short_preamble)
fi->rtscts_rate |= rate->hw_value_short;
@@ -2037,8 +2034,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
- (curchan->channelFlags & CHANNEL_5GHZ) &&
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
(chainmask == 0x7) && (rate < 0x90))
return 0x3;
else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2329,7 +2325,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (sc->sc_ah->caldata)
- sc->sc_ah->caldata->paprd_packet_sent = true;
+ set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -2379,6 +2375,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
bf->bf_buf_addr = 0;
+ if (sc->tx99_state)
+ goto skip_tx_complete;
if (bf->bf_state.bfs_paprd) {
if (time_after(jiffies,
@@ -2391,6 +2389,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
ath_tx_complete(sc, skb, tx_flags, txq);
}
+skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
*/
@@ -2749,3 +2748,46 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ath_txq_unlock(sc, txq);
}
}
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_buf *bf;
+ int padpos, padsize;
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, XMIT,
+ "tx99 padding failed\n");
+ return -EINVAL;
+ }
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->framelen = skb->len + FCS_LEN;
+ fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+ bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+ if (!bf) {
+ ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+ return -EINVAL;
+ }
+
+ ath_set_rates(sc->tx99_vif, NULL, bf);
+
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+ ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+ ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c81fce..a1a69c5db409 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -19,7 +19,7 @@
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "ath9k.h"
+#include "ath.h"
/*
* tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
u32 sz, i;
struct channel_detector *cd;
- struct ath_common *common = ath9k_hw_common(dpd->ah);
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
if (cd == NULL)
@@ -167,7 +166,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
return cd;
fail:
- ath_dbg(common, DFS,
+ ath_dbg(dpd->common, DFS,
"failed to allocate channel_detector for freq=%d\n", freq);
channel_detector_exit(dpd, cd);
return NULL;
@@ -242,7 +241,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
struct pri_detector *pd = cd->detectors[i];
struct pri_sequence *ps = pd->add_pulse(pd, event);
if (ps != NULL) {
- ath_dbg(ath9k_hw_common(dpd->ah), DFS,
+ ath_dbg(dpd->common, DFS,
"DFS: radar found on freq=%d: id=%d, pri=%d, "
"count=%d, count_false=%d\n",
event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
return false;
}
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+ return global_dfs_pool_stats;
+}
+
static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
enum nl80211_dfs_regions region)
{
@@ -284,14 +289,18 @@ static struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
+ .get_stats = dpd_get_stats,
.region = NL80211_DFS_UNSET,
};
struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
- struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
if (dpd == NULL)
@@ -300,7 +309,7 @@ dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
- dpd->ah = ah;
+ dpd->common = common;
if (dpd->set_dfs_domain(dpd, region))
return dpd;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abcc4265..dde2652b787c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -22,6 +22,19 @@
#include <linux/nl80211.h>
/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+ u32 pool_reference;
+ u32 pulse_allocated;
+ u32 pulse_alloc_error;
+ u32 pulse_used;
+ u32 pseq_allocated;
+ u32 pseq_alloc_error;
+ u32 pseq_used;
+};
+
+/**
* struct pulse_event - describing pulses reported by PHY
* @ts: pulse time stamp in us
* @freq: channel frequency in MHz
@@ -77,11 +90,12 @@ struct dfs_pattern_detector {
bool (*add_pulse)(struct dfs_pattern_detector *dpd,
struct pulse_event *pe);
+ struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
enum nl80211_dfs_regions region;
u8 num_radar_types;
u64 last_pulse_ts;
/* needed for ath_dbg() */
- struct ath_hw *ah;
+ struct ath_common *common;
const struct radar_detector_specs *radar_spec;
struct list_head channel_detectors;
@@ -92,15 +106,7 @@ struct dfs_pattern_detector {
* @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
* @return instance pointer on success, NULL otherwise
*/
-#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
extern struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
-#else
-static inline struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
-{
- return NULL;
-}
-#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
-
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region);
#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 5ba4b6fe37c0..43b608178884 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -17,10 +17,14 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "ath9k.h"
+#include "ath.h"
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
-#include "dfs_debug.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
/**
* struct pulse_elem - elements in pulse queue
@@ -392,7 +396,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
- return false;
+ return NULL;
}
ps = pseq_handler_check_detection(de);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d1abc6..79f0fff4d1e6 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -19,6 +19,8 @@
#include <linux/list.h>
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
/**
* struct pri_sequence - sequence of pulses matching one PRI
* @head: list_head
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7d077c752dd5..c00687e05688 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -356,14 +356,131 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ u16 country_code;
+
+ if (!ath_is_world_regd(reg))
+ return -EINVAL;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ return -EINVAL;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+ return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (__ath_reg_dyn_country(wiphy, reg, request))
+ return;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x "
+ "dynamically updated by %s\n",
+ reg->current_rd,
+ reg_initiator_name(request->initiator));
+}
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static void ath_reg_dyn_country_user(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return;
+ if (!dynamic_country_user_possible(reg))
+ return;
+ ath_reg_dyn_country(wiphy, reg, request);
+}
+
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
- u16 country_code;
-
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -388,25 +505,12 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
sizeof(struct ath_regulatory));
break;
case NL80211_REGDOM_SET_BY_DRIVER:
+ break;
case NL80211_REGDOM_SET_BY_USER:
+ ath_reg_dyn_country_user(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (!ath_is_world_regd(reg))
- break;
-
- country_code = ath_regd_find_country_by_name(request->alpha2);
- if (country_code == (u16) -1)
- break;
-
- reg->current_rd = COUNTRY_ERD_FLAG;
- reg->current_rd |= country_code;
-
- printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
- reg->current_rd);
- __ath_regd_init(reg);
-
- ath_reg_apply_world_flags(wiphy, request->initiator, reg);
-
+ ath_reg_dyn_country(wiphy, reg, request);
break;
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 000000000000..591ebaea8265
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+ tristate "Qualcomm Atheros WCN3660/3680 support"
+ depends on MAC80211 && HAS_DMA
+ ---help---
+ This module adds support for wireless adapters based on
+ Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+ If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+ bool "WCN36XX debugfs support"
+ depends on WCN36XX
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 000000000000..50c43b4382ba
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y += main.o \
+ dxe.o \
+ txrx.o \
+ smd.o \
+ pmc.o \
+ debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 000000000000..5b84f7ae0b1e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ char buf[3];
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ if (vif_priv->pw_state == WCN36XX_BMPS)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ break;
+ }
+ }
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ char buf[32];
+ int buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ }
+ }
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+ .open = simple_open,
+ .read = read_file_bool_bmps,
+ .write = write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ char buf[255], *tmp;
+ int buf_size;
+ u32 arg[WCN36xx_MAX_DUMP_ARGS];
+ int i;
+
+ memset(buf, 0, sizeof(buf));
+ memset(arg, 0, sizeof(arg));
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ tmp = buf;
+
+ for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+ char *begin;
+ begin = strsep(&tmp, " ");
+ if (begin == NULL)
+ break;
+
+ if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
+ break;
+ }
+
+ wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+ arg[3], arg[4]);
+ wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+ .open = simple_open,
+ .write = write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data) \
+ do { \
+ struct dentry *d; \
+ d = debugfs_create_file(__stringify(name), \
+ mode, dfs->rootdir, \
+ priv_data, fop); \
+ dfs->file_##name.dentry = d; \
+ if (IS_ERR(d)) { \
+ wcn36xx_warn("Create the debugfs entry failed");\
+ dfs->file_##name.dentry = NULL; \
+ } \
+ } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+ dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wcn->hw->wiphy->debugfsdir);
+ if (IS_ERR(dfs->rootdir)) {
+ wcn36xx_warn("Create the debugfs failed\n");
+ dfs->rootdir = NULL;
+ }
+
+ ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+ &fops_wcn36xx_bmps, wcn);
+ ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+ debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 000000000000..46307aa562d3
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS 5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+ struct dentry *dentry;
+ u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+ struct dentry *rootdir;
+ struct wcn36xx_dfs_file file_bmps_switcher;
+ struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 000000000000..ee25786b4447
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+ struct wcn36xx_dxe_ch *ch = is_low ?
+ &wcn->dxe_tx_l_ch :
+ &wcn->dxe_tx_h_ch;
+
+ return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+ *data = readl(wcn->mmio + addr);
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+ addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+ int i;
+
+ for (i = 0; i < ch->desc_num && ctl; i++) {
+ next = ctl->next;
+ kfree(ctl);
+ ctl = next;
+ }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ int i;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+ if (!cur_ctl)
+ goto out_fail;
+
+ cur_ctl->ctl_blk_order = i;
+ if (i == 0) {
+ ch->head_blk_ctl = cur_ctl;
+ ch->tail_blk_ctl = cur_ctl;
+ } else if (ch->desc_num - 1 == i) {
+ prev_ctl->next = cur_ctl;
+ cur_ctl->next = ch->head_blk_ctl;
+ } else {
+ prev_ctl->next = cur_ctl;
+ }
+ prev_ctl = cur_ctl;
+ }
+
+ return 0;
+
+out_fail:
+ wcn36xx_dxe_free_ctl_block(ch);
+ return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+ int ret;
+
+ wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+ wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+ wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+ wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+ wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+ wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+ wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
+ wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
+
+ wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+ wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+ wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+ wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+ wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+ wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+ wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+ wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+ /* DXE control block allocation */
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+ if (ret)
+ goto out_err;
+
+ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
+ ret = wcn->ctrl_ops->smsm_change_state(
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+ return 0;
+
+out_err:
+ wcn36xx_err("Failed to allocate DXE control blocks\n");
+ wcn36xx_dxe_free_ctl_blks(wcn);
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_desc *cur_dxe = NULL;
+ struct wcn36xx_dxe_desc *prev_dxe = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ size_t size;
+ int i;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
+ if (!wcn_ch->cpu_addr)
+ return -ENOMEM;
+
+ memset(wcn_ch->cpu_addr, 0, size);
+
+ cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ cur_ctl->desc = cur_dxe;
+ cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+ i * sizeof(struct wcn36xx_dxe_desc);
+
+ switch (wcn_ch->ch_type) {
+ case WCN36XX_DXE_CH_TX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+ break;
+ case WCN36XX_DXE_CH_TX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+ break;
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ }
+ if (0 == i) {
+ cur_dxe->phy_next_l = 0;
+ } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ } else if (i == (wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ cur_dxe->phy_next_l =
+ wcn_ch->head_blk_ctl->desc_phy_addr;
+ }
+ cur_ctl = cur_ctl->next;
+ prev_dxe = cur_dxe;
+ cur_dxe++;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+ struct wcn36xx_dxe_mem_pool *pool)
+{
+ int i, chunk_size = pool->chunk_size;
+ dma_addr_t bd_phy_addr = pool->phy_addr;
+ void *bd_cpu_addr = pool->virt_addr;
+ struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ /* Only every second dxe needs a bd pointer,
+ the other will point to the skb data */
+ if (!(i & 1)) {
+ cur->bd_phy_addr = bd_phy_addr;
+ cur->bd_cpu_addr = bd_cpu_addr;
+ bd_phy_addr += chunk_size;
+ bd_cpu_addr += chunk_size;
+ } else {
+ cur->bd_phy_addr = 0;
+ cur->bd_cpu_addr = NULL;
+ }
+ cur = cur->next;
+ }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data |= wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+ return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ dxe->dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ ctl->skb = skb;
+
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ int i;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_ctl);
+ cur_ctl = cur_ctl->next;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+ int i;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ kfree_skb(cur->skb);
+ cur = cur->next;
+ }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb) {
+ wcn36xx_warn("Spurious TX complete indication\n");
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (status == 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /*
+ * Make at least one loop of do-while because in case ring is
+ * completely full head and tail are pointing to the same element
+ * and while-do will not make any cycles.
+ */
+ do {
+ if (ctl->skb) {
+ dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ ctl->skb->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(ctl->skb);
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /* Keep frame until TX status comes */
+ ieee80211_free_txskb(wcn->hw, ctl->skb);
+ }
+ spin_lock_irqsave(&ctl->skb_lock, flags);
+ if (wcn->queues_stopped) {
+ wcn->queues_stopped = false;
+ ieee80211_wake_queues(wcn->hw);
+ }
+ spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+ ctl->skb = NULL;
+ }
+ ctl = ctl->next;
+ } while (ctl != ch->head_blk_ctl &&
+ !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+ ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+ int int_src, int_reason;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+ &int_reason);
+
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ }
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+ &int_reason);
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+ disable_irq_nosync(wcn->rx_irq);
+ wcn36xx_dxe_rx_frame(wcn);
+ enable_irq(wcn->rx_irq);
+ return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+ int ret;
+
+ ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+ IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc tx irq\n");
+ goto out_err;
+ }
+
+ ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+ "wcn36xx_rx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc rx irq\n");
+ goto out_txirq;
+ }
+
+ enable_irq_wake(wcn->rx_irq);
+
+ return 0;
+
+out_txirq:
+ free_irq(wcn->tx_irq, wcn);
+out_err:
+ return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = ctl->skb;
+ dma_addr = dxe->dst_addr_l;
+ wcn36xx_dxe_fill_skb(ctl);
+
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ break;
+ default:
+ wcn36xx_warn("Unknown channel\n");
+ }
+
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ ctl = ctl->next;
+ dxe = ctl->desc;
+ }
+
+ ch->head_blk_ctl = ctl;
+
+ return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+ int int_src;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }
+
+ if (!int_src)
+ wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+ size_t s;
+ void *cpu_addr;
+
+ /* Allocate BD headers for MGMT frames */
+
+ /* Where this come from ask QC */
+ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ /* Allocate BD headers for DATA frames */
+
+ /* Where this come from ask QC */
+ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->data_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ return 0;
+
+out_err:
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_err("Failed to allocate BD mempool\n");
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+ if (wcn->mgmt_mem_pool.virt_addr)
+ dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+ wcn->mgmt_mem_pool.virt_addr,
+ wcn->mgmt_mem_pool.phy_addr);
+
+ if (wcn->data_mem_pool.virt_addr) {
+ dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+ wcn->data_mem_pool.virt_addr,
+ wcn->data_mem_pool.phy_addr);
+ }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low)
+{
+ struct wcn36xx_dxe_ctl *ctl = NULL;
+ struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_ch *ch = NULL;
+ unsigned long flags;
+
+ ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+ ctl = ch->head_blk_ctl;
+
+ spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+ /*
+ * If skb is not null that means that we reached the tail of the ring
+ * hence ring is full. Stop queues to let mac80211 back off until ring
+ * has an empty slot again.
+ */
+ if (NULL != ctl->next->skb) {
+ ieee80211_stop_queues(wcn->hw);
+ wcn->queues_stopped = true;
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+ ctl->skb = NULL;
+ desc = ctl->desc;
+
+ /* Set source address of the BD we send */
+ desc->src_addr_l = ctl->bd_phy_addr;
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+ desc->ctrl = ch->ctrl_bd;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+ "BD >>> ", (char *)ctl->bd_cpu_addr,
+ sizeof(struct wcn36xx_tx_bd));
+
+ /* Set source address of the SKB we send */
+ ctl = ctl->next;
+ ctl->skb = skb;
+ desc = ctl->desc;
+ if (ctl->bd_cpu_addr) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ return -EINVAL;
+ }
+
+ desc->src_addr_l = dma_map_single(NULL,
+ ctl->skb->data,
+ ctl->skb->len,
+ DMA_TO_DEVICE);
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = ctl->skb->len;
+
+ /* set dxe descriptor to VALID */
+ desc->ctrl = ch->ctrl_skb;
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
+ (char *)ctl->skb->data, ctl->skb->len);
+
+ /* Move the head of the ring to the next empty descriptor */
+ ch->head_blk_ctl = ctl->next;
+
+ /*
+ * When connected and trying to send data frame chip can be in sleep
+ * mode and writing to the register will not wake up the chip. Instead
+ * notify chip about new frame through SMSM bus.
+ */
+ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
+ wcn->ctrl_ops->smsm_change_state(
+ 0,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
+ } else {
+ /* indicate End Of Packet and generate interrupt on descriptor
+ * done.
+ */
+ wcn36xx_dxe_write_register(wcn,
+ ch->reg_ctrl, ch->def_ctrl);
+ }
+
+ return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+ int reg_data = 0, ret;
+
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+ /* Setting interrupt path */
+ reg_data = WCN36XX_DXE_CCU_INT;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+ /***************************************/
+ /* Init descriptors for TX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+ wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX LOW */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+ WCN36XX_DXE_WQ_TX_L);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+ /***************************************/
+ /* Init descriptors for TX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+ wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX HIGH */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+ WCN36XX_DXE_WQ_TX_H);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+ /***************************************/
+ /* Init descriptors for RX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+ /* For RX we need to preallocated buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+ WCN36XX_DXE_WQ_RX_L);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_L,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+ /***************************************/
+ /* Init descriptors for RX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+ /* For RX we need to prealocat buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+ /* Write chanel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+ WCN36XX_DXE_WQ_RX_H);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_H,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+ ret = wcn36xx_dxe_request_irqs(wcn);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+ free_irq(wcn->tx_irq, wcn);
+ free_irq(wcn->rx_irq, wcn);
+
+ if (wcn->tx_ack_skb) {
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ }
+
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 000000000000..c88562f85de1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH = DMA4
+RX_LOW = DMA1
+RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE 0x03000000
+#define WCN36XX_DXE_MEM_REG 0x202000
+
+#define WCN36XX_DXE_CCU_INT 0xA0011
+#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L 0x328a44
+#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L 0x17
+#define WCN36XX_DXE_WQ_TX_H 0x17
+#define WCN36XX_DXE_WQ_RX_L 0xB
+#define WCN36XX_DXE_WQ_RX_H 0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
+ /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET 0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE 128
+
+#define WCN36XX_PKT_SIZE 0xF20
+enum wcn36xx_dxe_ch_type {
+ WCN36XX_DXE_CH_TX_L,
+ WCN36XX_DXE_CH_TX_H,
+ WCN36XX_DXE_CH_RX_L,
+ WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
+ * //descriptor
+ * u32 transfer_type :2; //0 = Host to Host space
+ * u32 eop :1; //End of Packet
+ * u32 bd_handling :1; //if transferType = Host to BMU, then 0
+ * // means first 128 bytes contain BD, and 1
+ * // means create new empty BD
+ * u32 siq :1; // SIQ
+ * u32 diq :1; // DIQ
+ * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
+ * // 1 = release them
+ * u32 bthld_sel :4; //BMU Threshold Select
+ * u32 prio :3; //Specifies the priority level to use for
+ * // the transfer
+ * u32 stop_channel :1; //1 = DMA stops processing further, channel
+ * //requires re-enabling after this
+ * u32 intr :1; //Interrupt on Descriptor Done
+ * u32 rsvd :1; //reserved
+ * u32 size :14;//14 bits used - ignored for BMU transfers,
+ * //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+ u32 ctrl;
+ u32 fr_len;
+
+ u32 src_addr_l;
+ u32 dst_addr_l;
+ u32 phy_next_l;
+ u32 src_addr_h;
+ u32 dst_addr_h;
+ u32 phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+ struct wcn36xx_dxe_ctl *next;
+ struct wcn36xx_dxe_desc *desc;
+ unsigned int desc_phy_addr;
+ int ctl_blk_order;
+ struct sk_buff *skb;
+ spinlock_t skb_lock;
+ void *bd_cpu_addr;
+ dma_addr_t bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+ enum wcn36xx_dxe_ch_type ch_type;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ enum wcn36xx_dxe_ch_desc_num desc_num;
+ /* DXE control block ring */
+ struct wcn36xx_dxe_ctl *head_blk_ctl;
+ struct wcn36xx_dxe_ctl *tail_blk_ctl;
+
+ /* DXE channel specific configs */
+ u32 dxe_wq;
+ u32 ctrl_bd;
+ u32 ctrl_skb;
+ u32 reg_ctrl;
+ u32 def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+ int chunk_size;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 000000000000..c02dbc618724
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+ API VERSIONING INFORMATION
+
+ The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+ The MAJOR is incremented for major product/architecture changes
+ (and then MINOR/VERSION/REVISION are zeroed)
+ The MINOR is incremented for minor product/architecture changes
+ (and then VERSION/REVISION are zeroed)
+ The VERSION is incremented if a significant API change occurs
+ (and then REVISION is zeroed)
+ The REVISION is incremented if an insignificant API change occurs
+ or if a new API is added
+ All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC 8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC 4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN 4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
+#define PARAM_llACOEXIST_CHANGED (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
+#define PARAM_OBSS_MODE_CHANGED (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+ (PARAM_BCN_INTERVAL_CHANGED | \
+ PARAM_SHORT_PREAMBLE_CHANGED | \
+ PARAM_SHORT_SLOT_TIME_CHANGED | \
+ PARAM_llACOEXIST_CHANGED | \
+ PARAM_llBCOEXIST_CHANGED | \
+ PARAM_llGCOEXIST_CHANGED | \
+ PARAM_HT20MHZCOEXIST_CHANGED | \
+ PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
+ PARAM_RIFS_MODE_CHANGED | \
+ PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
+ PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH 64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+ /* Init/De-Init */
+ WCN36XX_HAL_START_REQ = 0,
+ WCN36XX_HAL_START_RSP = 1,
+ WCN36XX_HAL_STOP_REQ = 2,
+ WCN36XX_HAL_STOP_RSP = 3,
+
+ /* Scan */
+ WCN36XX_HAL_INIT_SCAN_REQ = 4,
+ WCN36XX_HAL_INIT_SCAN_RSP = 5,
+ WCN36XX_HAL_START_SCAN_REQ = 6,
+ WCN36XX_HAL_START_SCAN_RSP = 7,
+ WCN36XX_HAL_END_SCAN_REQ = 8,
+ WCN36XX_HAL_END_SCAN_RSP = 9,
+ WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+ WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+ /* HW STA configuration/deconfiguration */
+ WCN36XX_HAL_CONFIG_STA_REQ = 12,
+ WCN36XX_HAL_CONFIG_STA_RSP = 13,
+ WCN36XX_HAL_DELETE_STA_REQ = 14,
+ WCN36XX_HAL_DELETE_STA_RSP = 15,
+ WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+ WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+ WCN36XX_HAL_DELETE_BSS_REQ = 18,
+ WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+ /* Infra STA asscoiation */
+ WCN36XX_HAL_JOIN_REQ = 20,
+ WCN36XX_HAL_JOIN_RSP = 21,
+ WCN36XX_HAL_POST_ASSOC_REQ = 22,
+ WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+ /* Security */
+ WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+ WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+ WCN36XX_HAL_SET_STAKEY_REQ = 26,
+ WCN36XX_HAL_SET_STAKEY_RSP = 27,
+ WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+ WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+ WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+ WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+ /* Qos Related */
+ WCN36XX_HAL_ADD_TS_REQ = 32,
+ WCN36XX_HAL_ADD_TS_RSP = 33,
+ WCN36XX_HAL_DEL_TS_REQ = 34,
+ WCN36XX_HAL_DEL_TS_RSP = 35,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+ WCN36XX_HAL_ADD_BA_REQ = 38,
+ WCN36XX_HAL_ADD_BA_RSP = 39,
+ WCN36XX_HAL_DEL_BA_REQ = 40,
+ WCN36XX_HAL_DEL_BA_RSP = 41,
+
+ WCN36XX_HAL_CH_SWITCH_REQ = 42,
+ WCN36XX_HAL_CH_SWITCH_RSP = 43,
+ WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+ WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+ WCN36XX_HAL_GET_STATS_REQ = 46,
+ WCN36XX_HAL_GET_STATS_RSP = 47,
+ WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+ WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+ WCN36XX_HAL_MISSED_BEACON_IND = 50,
+ WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+ WCN36XX_HAL_MIC_FAILURE_IND = 52,
+ WCN36XX_HAL_FATAL_ERROR_IND = 53,
+ WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+ /* NV Interface */
+ WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+ WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+ WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+ WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+ WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+ WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+ WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+ WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+ WCN36XX_HAL_SEND_BEACON_REQ = 63,
+ WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+ WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+ WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+ WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+ /* PTT interface support */
+ WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+ WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+ /* BTAMP related events */
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+ WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+ WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+ WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+ WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+ WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+ WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+ WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+ WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+ WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+ WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+ WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+ WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+ WCN36XX_HAL_GET_RSSI_REQ = 92,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+ WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+ WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+ WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+ WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+ WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+ WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+ WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+ WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+ WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+ WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+ WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+ WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+ WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+ WCN36XX_HAL_GET_RSSI_RSP = 113,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+ /* 11k related events */
+ WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+ WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+ /* 11R related msgs */
+ WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+ WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+ /* P2P WLAN_FEATURE_P2P */
+ WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+ WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+ /* WLAN Dump commands */
+ WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+ WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+ /* OEM_DATA FEATURE SUPPORT */
+ WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+ WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+ /* ADD SELF STA REQ and RSP */
+ WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+ WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+ /* DEL SELF STA SUPPORT */
+ WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+ WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+ /* Coex Indication */
+ WCN36XX_HAL_COEX_IND = 129,
+
+ /* Tx Complete Indication */
+ WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+ /* Host Suspend/resume messages */
+ WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+ WCN36XX_HAL_HOST_RESUME_REQ = 132,
+ WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+ WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+ WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+ WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+ WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+ WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+ WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+ WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+ WCN36XX_HAL_RADAR_DETECT_IND = 143,
+ WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+ WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+ WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+ /* PNO messages */
+ WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+ WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+ WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+ WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+ WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+ WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+ WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+ WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+ WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+ WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+ WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+ WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+ /*
+ * This is temp fix. Should be removed once Host and Riva code is
+ * in sync.
+ */
+ WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+ WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+ WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+ WCN36XX_HAL_TSM_STATS_REQ = 168,
+ WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+ /* wake reason indication (WOW) */
+ WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+ /* GTK offload support */
+ WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+ WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+ WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+ WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+ WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+ WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+ WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+ WCN36XX_HAL_DEL_BA_IND = 188,
+ WCN36XX_HAL_DHCP_START_IND = 189,
+ WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+ WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+ WCN36XX_HAL_MSG_VERSION0 = 0,
+ WCN36XX_HAL_MSG_VERSION1 = 1,
+ /* define as 2 bytes data */
+ WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+ WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+ DRIVER_TYPE_PRODUCTION = 0,
+ DRIVER_TYPE_MFG = 1,
+ DRIVER_TYPE_DVT = 2,
+ DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+ HAL_STOP_TYPE_SYS_RESET,
+ HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+ HAL_STOP_TYPE_RF_KILL,
+ HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+ HAL_SYS_MODE_NORMAL,
+ HAL_SYS_MODE_LEARN,
+ HAL_SYS_MODE_SCAN,
+ HAL_SYS_MODE_PROMISC,
+ HAL_SYS_MODE_SUSPEND_LINK,
+ HAL_SYS_MODE_ROAM_SCAN,
+ HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+ HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+ /* 20MHz IF bandwidth centered on IF carrier */
+ PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+ /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+ /* 40MHz IF bandwidth centered on IF carrier */
+ PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+ /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+ /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+ /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+ PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+ /* Static SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+ /* Dynamic SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+ /* reserved */
+ WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+ /* SM Power Save disabled */
+ WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+ WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+ STA_TAURUS = 0,
+ STA_TITAN,
+ STA_POLARIS,
+ STA_11b,
+ STA_11bg,
+ STA_11a,
+ STA_11n,
+ STA_11ac,
+ STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES 4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES 8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES 3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
+
+enum wcn36xx_hal_bss_type {
+ WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+ /* Added for softAP support */
+ WCN36XX_HAL_INFRA_AP_MODE,
+
+ WCN36XX_HAL_IBSS_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_STA_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_AP_MODE,
+
+ WCN36XX_HAL_AUTO_MODE,
+
+ WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+ WCN36XX_HAL_11A_NW_TYPE,
+ WCN36XX_HAL_11B_NW_TYPE,
+ WCN36XX_HAL_11G_NW_TYPE,
+ WCN36XX_HAL_11N_NW_TYPE,
+ WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
+
+enum wcn36xx_hal_ht_operating_mode {
+ /* No Protection */
+ WCN36XX_HAL_HT_OP_MODE_PURE,
+
+ /* Overlap Legacy device present, protection is optional */
+ WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+ /* No legacy device, but 20 MHz HT present */
+ WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+ /* Protection is required */
+ WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+ WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+ WCN36XX_HAL_ED_NONE,
+ WCN36XX_HAL_ED_WEP40,
+ WCN36XX_HAL_ED_WEP104,
+ WCN36XX_HAL_ED_TKIP,
+ WCN36XX_HAL_ED_CCMP,
+ WCN36XX_HAL_ED_WPI,
+ WCN36XX_HAL_ED_AES_128_CMAC,
+ WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN 16
+#define WLAN_WAPI_KEY_RSC_LEN 16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+ WCN36XX_HAL_TX_ONLY,
+ WCN36XX_HAL_RX_ONLY,
+ WCN36XX_HAL_TX_RX,
+ WCN36XX_HAL_TX_DEFAULT,
+ WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+ WCN36XX_HAL_WEP_STATIC,
+ WCN36XX_HAL_WEP_DYNAMIC,
+ WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+ WCN36XX_HAL_LINK_IDLE_STATE = 0,
+ WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+ WCN36XX_HAL_LINK_AP_STATE = 3,
+ WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+ /* BT-AMP Case */
+ WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+ WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+ WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+ WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+ /* Reserved for HAL Internal Use */
+ WCN36XX_HAL_LINK_LEARN_STATE = 9,
+ WCN36XX_HAL_LINK_SCAN_STATE = 10,
+ WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+ WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+ WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+ WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+ WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+ HAL_SUMMARY_STATS_INFO = 0x00000001,
+ HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+ BTAMP_EVENT_CONNECTION_START,
+ BTAMP_EVENT_CONNECTION_STOP,
+ BTAMP_EVENT_CONNECTION_TERMINATED,
+
+ /* This and beyond are invalid values */
+ BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+ PE_SUMMARY_STATS_INFO = 0x00000001,
+ PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ PE_PER_STA_STATS_INFO = 0x00000020,
+
+ /* This and beyond are invalid values */
+ PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID 0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
+#define WCN36XX_HAL_CFG_CAL_PERIOD 5
+#define WCN36XX_HAL_CFG_CAL_CONTROL 6
+#define WCN36XX_HAL_CFG_PROXIMITY 7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
+#define WCN36XX_HAL_CFG_FIXED_RATE 18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
+#define WCN36XX_HAL_CFG_STATS_PERIOD 32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
+#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+ u8 revision;
+ u8 version;
+ u8 minor;
+ u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+ u8 id;
+
+ /* 0 for multicast */
+ u8 unicast;
+
+ enum ani_key_direction direction;
+
+ /* Usage is unknown */
+ u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+ /* =1 for authenticator,=0 for supplicant */
+ u8 pae_role;
+
+ u16 length;
+ u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+ /* STA Index */
+ u16 sta_index;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* STATIC/DYNAMIC - valid only for WEP */
+ enum ani_wep_type wep_type;
+
+ /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+ u8 def_wep_idx;
+
+ /* valid only for non-static WEP encyrptions */
+ struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /*
+ * Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX
+ */
+ u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+ enum wcn36xx_hal_host_msg_type msg_type:16;
+ enum wcn36xx_hal_host_msg_version msg_version:16;
+ u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+ /* Cfg Id. The Id required by HAL is exported by HAL
+ * in shared header file between UMAC and HAL.*/
+ u16 id;
+
+ /* Length of the Cfg. This parameter is used to go to next cfg
+ * in the TLV format.*/
+ u16 len;
+
+ /* Padding bytes for unaligned address's */
+ u16 pad_bytes;
+
+ /* Reserve bytes for making cfgVal to align address */
+ u16 reserve;
+
+ /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+ * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+ /* Drive Type - Production or FTM etc */
+ enum driver_type type;
+
+ /* Length of the config buffer */
+ u32 len;
+
+ /* Following this there is a TLV formatted buffer of length
+ * "len" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+ /* config buffer must start in TLV format just here */
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+ /* success or failure */
+ u16 status;
+
+ /* Max number of STA supported by the device */
+ u8 stations;
+
+ /* Max number of BSS supported by the device */
+ u8 bssids;
+
+ /* API Version */
+ struct wcnss_wlan_version version;
+
+ /* CRM build information */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+ /* hardware/chipset/misc version information */
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+ /* The reason for which the device is being stopped */
+ enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+ /*
+ * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+ * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+ */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+ u32 len;
+
+ /*
+ * Following this there is a TLV formatted buffer of length
+ * "uConfigBufferLen" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+ u8 subType:4;
+ u8 type:2;
+ u8 protVer:2;
+
+ u8 order:1;
+ u8 wep:1;
+ u8 moreData:1;
+ u8 powerMgmt:1;
+ u8 retry:1;
+ u8 moreFrag:1;
+ u8 fromDS:1;
+ u8 toDS:1;
+
+#else
+
+ u8 protVer:2;
+ u8 type:2;
+ u8 subType:4;
+
+ u8 toDS:1;
+ u8 fromDS:1;
+ u8 moreFrag:1;
+ u8 retry:1;
+ u8 powerMgmt:1;
+ u8 moreData:1;
+ u8 wep:1;
+ u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+ u8 fragNum:4;
+ u8 seqNumLo:4;
+ u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+ struct wcn36xx_hal_mac_frame_ctl fc;
+ u8 durationLo;
+ u8 durationHi;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssId[6];
+ struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID 2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+ u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+ u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_len;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+ /* Single NoA usage in Scanning */
+ u8 use_noa;
+
+ /* Indicates the scan duration (in ms) */
+ u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to scan */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u32 start_tsf[2];
+ u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to stop scanning. Not used really. But
+ * retained for symmetry with "start Scan" message. It can also
+ * help in error check if needed. */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Identifies the operational state of the AP/STA
+ * LEARN - AP Role SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* Operating channel to tune to. */
+ u8 oper_channel;
+
+ /* Channel Bonding state If 20/40 MHz is operational, this will
+ * indicate the 40 MHz extension channel in combination with the
+ * control channel */
+ enum phy_chan_bond_state cb_state;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+ HW_RATE_INDEX_1MBPS = 0x82,
+ HW_RATE_INDEX_2MBPS = 0x84,
+ HW_RATE_INDEX_5_5MBPS = 0x8B,
+ HW_RATE_INDEX_6MBPS = 0x0C,
+ HW_RATE_INDEX_9MBPS = 0x12,
+ HW_RATE_INDEX_11MBPS = 0x96,
+ HW_RATE_INDEX_12MBPS = 0x18,
+ HW_RATE_INDEX_18MBPS = 0x24,
+ HW_RATE_INDEX_24MBPS = 0x30,
+ HW_RATE_INDEX_36MBPS = 0x48,
+ HW_RATE_INDEX_48MBPS = 0x60,
+ HW_RATE_INDEX_54MBPS = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+ /*
+ * For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved. */
+ /* Titan and Taurus Rates */
+ u32 enhanced_rate_bitmap;
+
+ /*
+ * 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /*
+ * RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* TODO move this parameter to the end for 3680 */
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* TODO add this parameter for 3680. */
+ /* Reserved to align next field on a dword boundary */
+ /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+ /* success or failure */
+ u32 status;
+
+ /* Station index; valid only when 'status' field value SUCCESS */
+ u8 sta_index;
+
+ /* BSSID Index of BSS to which the station is associated */
+ u8 bssid_index;
+
+ /* DPU Index for PTK */
+ u8 dpu_index;
+
+ /* DPU Index for GTK */
+ u8 bcast_dpu_index;
+
+ /* DPU Index for IGTK */
+ u8 bcast_mgmt_dpu_idx;
+
+ /* PTK DPU signature */
+ u8 uc_ucast_sig;
+
+ /* GTK DPU isignature */
+ u8 uc_bcast_sig;
+
+ /* IGTK DPU signature */
+ u8 uc_mgmt_sig;
+
+ u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Index of STA to delete */
+ u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Index of STA deleted */
+ u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+ u8 num_rates;
+ u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:1;
+ u8 aci:2;
+ u8 acm:1;
+ u8 aifsn:4;
+#else
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 max:4;
+ u8 min:4;
+#else
+ u8 min:4;
+ u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+ struct wcn36xx_hal_aci_aifsn aci;
+ struct wcn36xx_hal_mac_cw cw;
+ u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ * in the software system. */
+enum wcn36xx_hal_con_mode {
+ WCN36XX_HAL_STA_MODE = 0,
+
+ /* to support softAp mode . This is misleading.
+ It means AP MODE only. */
+ WCN36XX_HAL_STA_SAP_MODE = 1,
+
+ WCN36XX_HAL_P2P_CLIENT_MODE,
+ WCN36XX_HAL_P2P_GO_MODE,
+ WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+ HAL_STA = 1,
+ HAL_SAP = 2,
+
+ /* to support sta, softAp mode . This means STA+AP mode */
+ HAL_STA_SAP = 3,
+
+ HAL_P2P_CLIENT = 4,
+ HAL_P2P_GO = 8,
+ HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* TODO move sta to the end for 3680 */
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params sta;
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index allocated by HAL */
+ u8 bss_index;
+
+ /* DPU descriptor index for PTK */
+ u8 dpu_desc_index;
+
+ /* PTK DPU signature */
+ u8 ucast_dpu_signature;
+
+ /* DPU descriptor index for GTK */
+ u8 bcast_dpu_desc_indx;
+
+ /* GTK DPU signature */
+ u8 bcast_dpu_signature;
+
+ /* DPU descriptor for IGTK */
+ u8 mgmt_dpu_desc_index;
+
+ /* IGTK DPU signature */
+ u8 mgmt_dpu_signature;
+
+ /* Station Index for BSS entry */
+ u8 bss_sta_index;
+
+ /* Self station index for this BSS */
+ u8 bss_self_sta_index;
+
+ /* Bcast station for buffering bcast frames in AP role */
+ u8 bss_bcast_sta_idx;
+
+ /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+ u8 mac[ETH_ALEN];
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS index to be deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index that has been deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the BSSID to which STA is going to associate */
+ u8 bssid[ETH_ALEN];
+
+ /* Indicates the channel to switch to. */
+ u8 channel;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* link State */
+ enum wcn36xx_hal_link_state link_state;
+
+ /* Max TX power */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct wcn36xx_hal_config_sta_params sta_params;
+ struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct config_sta_rsp_params sta_rsp_params;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Number of keys */
+ u8 num_keys;
+
+ /* Array of keys. */
+ struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /* Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX */
+ u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+ struct wcn36xx_hal_set_bss_key_req_msg Msg;
+ u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+ * Static/Dynamic keys */
+ enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA Index */
+ u16 sta_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* Whether to invalidate the Broadcast key or Unicast key. In case
+ * of WEP, the same key is used for both broadcast and unicast. */
+ u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ tSirMacAddr self_mac_addr;
+ u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Channel number */
+ u8 channel_number;
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ u8 tx_mgmt_power;
+
+ /* Max TX power */
+ u8 max_tx_power;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* VO WIFI comment: BSSID needed to identify session. As the
+ * request has power constraints, this should be applied only to
+ * that session Since MTU timing and EDCA are sessionized, this
+ * struct needs to be sessionized and bssid needs to be out of the
+ * VOWifi feature flag V IMP: Keep bssId field at the end of this
+ * msg. It is used to mantain backward compatbility by way of
+ * ignoring if using new host/old FW or old host/new FW since it is
+ * at the end of this struct
+ */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Status */
+ u32 status;
+
+ /* Channel number - same as in request */
+ u8 channel_number;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+
+ /* BSSID needed to identify session - same as in request */
+ u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*BSS Index */
+ u16 bss_index;
+
+ /* Best Effort */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* Background */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* Video */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* Voice */
+ struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct dpu_stats_params {
+ /* Index of STA to which the statistics */
+ u16 sta_index;
+
+ /* Encryption mode */
+ u8 enc_mode;
+
+ /* status */
+ u32 status;
+
+ /* Statistics */
+ u32 send_blocks;
+ u32 recv_blocks;
+ u32 replays;
+ u8 mic_error_cnt;
+ u32 prot_excl_cnt;
+ u16 format_err_cnt;
+ u16 un_decryptable_cnt;
+ u32 decrypt_err_cnt;
+ u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+
+ /* Categories of stats requested as specified in eHalStatsMask */
+ u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+ /* Total number of packets(per AC) that were successfully
+ * transmitted with retries */
+ u32 retry_cnt[4];
+
+ /* The number of MSDU packets and MMPDU frames per AC that the
+ * 802.11 station successfully transmitted after more than one
+ * retransmission attempt */
+ u32 multiple_retry_cnt[4];
+
+ /* Total number of packets(per AC) that were successfully
+ * transmitted (with and without retries, including multi-cast,
+ * broadcast) */
+ u32 tx_frm_cnt[4];
+
+ /* Total number of packets that were successfully received (after
+ * appropriate filter rules including multi-cast, broadcast) */
+ u32 rx_frm_cnt;
+
+ /* Total number of duplicate frames received successfully */
+ u32 frm_dup_cnt;
+
+ /* Total number packets(per AC) failed to transmit */
+ u32 fail_cnt[4];
+
+ /* Total number of RTS/CTS sequence failures for transmission of a
+ * packet */
+ u32 rts_fail_cnt;
+
+ /* Total number packets failed transmit because of no ACK from the
+ * remote entity */
+ u32 ack_fail_cnt;
+
+ /* Total number of RTS/CTS sequence success for transmission of a
+ * packet */
+ u32 rts_succ_cnt;
+
+ /* The sum of the receive error count and dropped-receive-buffer
+ * error count. HAL will provide this as a sum of (FCS error) +
+ * (Fail get BD/PDU in HW) */
+ u32 rx_discard_cnt;
+
+ /*
+ * The receive error count. HAL will provide the RxP FCS error
+ * global counter. */
+ u32 rx_error_cnt;
+
+ /* The sum of the transmit-directed byte count, transmit-multicast
+ * byte count and transmit-broadcast byte count. HAL will sum TPE
+ * UC/MC/BCAST global counters to provide this. */
+ u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+ /* Legacy rates */
+ HAL_TX_RATE_LEGACY = 0x1,
+
+ /* HT20 rates */
+ HAL_TX_RATE_HT20 = 0x2,
+
+ /* HT40 rates */
+ HAL_TX_RATE_HT40 = 0x4,
+
+ /* Rate with Short guard interval */
+ HAL_TX_RATE_SGI = 0x8,
+
+ /* Rate with Long guard interval */
+ HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames */
+ u32 rx_frag_cnt;
+
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames when a promiscuous packet filter
+ * was enabled */
+ u32 promiscuous_rx_frag_cnt;
+
+ /* The receiver input sensitivity referenced to a FER of 8% at an
+ * MPDU length of 1024 bytes at the antenna connector. Each element
+ * of the array shall correspond to a supported rate and the order
+ * shall be the same as the supporteRates parameter. */
+ u32 rx_input_sensitivity;
+
+ /* The maximum transmit power in dBm upto one decimal. for eg: if
+ * it is 10.5dBm, the value would be 105 */
+ u32 max_pwr;
+
+ /* Number of times the receiver failed to synchronize with the
+ * incoming signal after detecting the sync in the preamble of the
+ * transmitted PLCP protocol data unit. */
+ u32 sync_fail_cnt;
+
+ /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+ * recently transmitted frame */
+ u32 tx_rate;
+
+ /* mcs index for HT20 and HT40 rates */
+ u32 mcs_index;
+
+ /* to differentiate between HT20 and HT40 rates; short and long
+ * guard interval */
+ u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+ /* The number of unencrypted received MPDU frames that the MAC
+ * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+ * management information base (MIB) object is enabled */
+ u32 rx_wep_unencrypted_frm_cnt;
+
+ /* The number of received MSDU packets that that the 802.11 station
+ * discarded because of MIC failures */
+ u32 rx_mic_fail_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a TKIP ICV error */
+ u32 tkip_icv_err;
+
+ /* The number of received MPDU frames that the 802.11 discarded
+ * because of an invalid AES-CCMP format */
+ u32 aes_ccmp_format_err;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of the AES-CCMP replay protection procedure */
+ u32 aes_ccmp_replay_cnt;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of errors detected by the AES-CCMP decryption
+ * algorithm */
+ u32 aes_ccmp_decrpt_err;
+
+ /* The number of encrypted MPDU frames received for which a WEP
+ * decryption key was not available on the 802.11 station */
+ u32 wep_undecryptable_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a WEP ICV error */
+ u32 wep_icv_err;
+
+ /* The number of received encrypted packets that the 802.11 station
+ * successfully decrypted */
+ u32 rx_decrypt_succ_cnt;
+
+ /* The number of encrypted packets that the 802.11 station failed
+ * to decrypt */
+ u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+ struct ani_global_security_stats uc_stats;
+ struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+ /* This counter shall be incremented for a received A-MSDU frame
+ * with the stations MAC address in the address 1 field or an
+ * A-MSDU frame with a group address in the address 1 field */
+ u32 rx_amsdu_cnt;
+
+ /* This counter shall be incremented when the MAC receives an AMPDU
+ * from the PHY */
+ u32 rx_ampdu_cnt;
+
+ /* This counter shall be incremented when a Frame is transmitted
+ * only on the primary channel */
+ u32 tx_20_frm_cnt;
+
+ /* This counter shall be incremented when a Frame is received only
+ * on the primary channel */
+ u32 rx_20_frm_cnt;
+
+ /* This counter shall be incremented by the number of MPDUs
+ * received in the A-MPDU when an A-MPDU is received */
+ u32 rx_mpdu_in_ampdu_cnt;
+
+ /* This counter shall be incremented when an MPDU delimiter has a
+ * CRC error when this is the first CRC error in the received AMPDU
+ * or when the previous delimiter has been decoded correctly */
+ u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+ /* The number of MPDU frames that the 802.11 station transmitted
+ * and acknowledged through a received 802.11 ACK frame */
+ u32 tx_frag_cnt[4];
+
+ /* This counter shall be incremented when an A-MPDU is transmitted */
+ u32 tx_ampdu_cnt;
+
+ /* This counter shall increment by the number of MPDUs in the AMPDU
+ * when an A-MPDU is transmitted */
+ u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* STA Idx */
+ u32 sta_index;
+
+ /* Categories of STATS being returned as per eHalStatsMask */
+ u32 stats_mask;
+
+ /* message type is same as the request type */
+ u16 msg_type;
+
+ /* length of the entire request, includes the pStatsBuf length too */
+ u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ enum wcn36xx_hal_link_state state;
+ u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u16 ackPolicy:2;
+ u16 userPrio:3;
+ u16 psb:1;
+ u16 aggregation:1;
+ u16 accessPolicy:2;
+ u16 direction:2;
+ u16 tsid:4;
+ u16 trafficType:1;
+#else
+ u16 trafficType:1;
+ u16 tsid:4;
+ u16 direction:2;
+ u16 accessPolicy:2;
+ u16 aggregation:1;
+ u16 psb:1;
+ u16 userPrio:3;
+ u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:7;
+ u8 schedule:1;
+#else
+ u8 schedule:1;
+ u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+ struct wcn36xx_hal_ts_info_tfc traffic;
+ struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+ u8 type;
+ u8 length;
+ struct wcn36xx_hal_ts_info ts_info;
+ u16 nom_msdu_size;
+ u16 max_msdu_size;
+ u32 min_svc_interval;
+ u32 max_svc_interval;
+ u32 inact_interval;
+ u32 suspend_interval;
+ u32 svc_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_sz;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bw;
+ u16 medium_time;
+};
+
+struct add_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct del_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To lookup station id using the mac address */
+ u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* Peer MAC Address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* ADDBA Action Frame dialog token
+ HAL will not interpret this object */
+ u8 dialog_token;
+
+ /* TID for which the BA is being setup
+ This identifies the TC or TS of interest */
+ u8 tid;
+
+ /* 0 - Delayed BA (Not supported)
+ 1 - Immediate BA */
+ u8 policy;
+
+ /* Indicates the number of buffers for this TID (baTID)
+ NOTE - This is the requested buffer size. When this
+ is processed by HAL and subsequently by HDD, it is
+ possible that HDD may change this buffer size. Any
+ change in the buffer size should be noted by PE and
+ advertized appropriately in the ADDBA response */
+ u16 buffer_size;
+
+ /* BA timeout in TU's 0 means no timeout will occur */
+ u16 timeout;
+
+ /* b0..b3 - Fragment Number - Always set to 0
+ b4..b15 - Starting Sequence Number of first MSDU
+ for which this BA is setup */
+ u16 ssn;
+
+ /* ADDBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+
+ /* TID for which the BA session has been setup */
+ u8 ba_tid;
+
+ /* BA Buffer Size allocated for the current BA session */
+ u8 ba_buffer_size;
+
+ u8 ba_session_id;
+
+ /* Reordering Window buffer */
+ u8 win_size;
+
+ /* Station Index to id the sta */
+ u8 sta_index;
+
+ /* Starting Sequence Number */
+ u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* Reorder Window Size */
+ u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+ u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+ u16 ba_enable:1;
+ u16 starting_seq_num:12;
+ u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+ u8 sta_addr[ETH_ALEN];
+ struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+ u8 sta_index;
+ u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Candidate List(tTriggerBaCandidate)
+ */
+ u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Rsp Candidate List(tTriggerRspBaCandidate)
+ */
+ u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TID for which the BA session is being deleted */
+ u8 tid;
+
+ /* DELBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Traffic Id */
+ u8 tid;
+
+ u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ /* Uplink Packet Queue delay */
+ u16 uplink_pkt_queue_delay;
+
+ /* Uplink Packet Queue delay histogram */
+ u16 uplink_pkt_queue_delay_hist[4];
+
+ /* Uplink Packet Transmit delay */
+ u32 uplink_pkt_tx_delay;
+
+ /* Uplink Packet loss */
+ u16 uplink_pkt_loss;
+
+ /* Uplink Packet count */
+ u16 uplink_pkt_count;
+
+ /* Roaming count */
+ u8 roaming_count;
+
+ /* Roaming Delay */
+ u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*bssid of the keys */
+ u8 bssidx;
+ u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+ /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+ * messages should be
+ * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+ * nv_img_buffer_size */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Fragment sequence number of the NV Image. Note that NV Image
+ * might not fit into one message due to size limitation of the SMD
+ * channel FIFO. UMAC can hence choose to chop the NV blob into
+ * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+ * The last fragment MUST be indicated by marking the
+ * isLastFragment field to 1. Note that all the NV blobs would be
+ * concatenated together by HAL without any padding bytes in
+ * between.*/
+ u16 frag_number;
+
+ /* Is this the last fragment? When set to 1 it indicates that no
+ * more fragments will be sent by UMAC and HAL can concatenate all
+ * the NV blobs rcvd & proceed with the parsing. HAL would generate
+ * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+ * after it receives each fragment */
+ u16 last_fragment;
+
+ /* NV Image size (number of bytes) */
+ u32 nv_img_buffer_size;
+
+ /* Following the 'nv_img_buffer_size', there should be
+ * nv_img_buffer_size bytes of NV Image i.e.
+ * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure. HAL would generate a
+ * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+ /* Note: The length specified in tHalNvStoreInd messages should be
+ * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+ struct wcn36xx_hal_msg_header header;
+
+ /* NV Item */
+ u32 table_id;
+
+ /* Size of NV Blob */
+ u32 nv_blob_size;
+
+ /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+ * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+
+ /* address used to compute MIC */
+ u8 src_addr[ETH_ALEN];
+
+ /* transmitter address */
+ u8 ta_addr[ETH_ALEN];
+
+ u8 dst_addr[ETH_ALEN];
+
+ u8 multicast;
+
+ /* first byte of IV */
+ u8 iv1;
+
+ /* second byte of IV */
+ u8 key_id;
+
+ /* sequence number */
+ u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+ /* receive address */
+ u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 op_mode;
+ u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+};
+
+struct update_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* shortPreamble mode. HAL should update all the STA rates when it
+ * receives this message */
+ u8 short_preamble;
+
+ /* short Slot time. */
+ u8 short_slot_time;
+
+ /* Beacon Interval */
+ u16 beacon_interval;
+
+ /* Protection related */
+ u8 lla_coexist;
+ u8 llb_coexist;
+ u8 llg_coexist;
+ u8 ht20_coexist;
+ u8 lln_non_gf_coexist;
+ u8 lsig_tx_op_protection_full_support;
+ u8 rifs_mode;
+
+ u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* length of the template. */
+ u32 beacon_length;
+
+ /* Beacon data. */
+ u8 beacon[BEACON_TEMPLATE_SIZE];
+
+ u8 bssid[ETH_ALEN];
+
+ /* TIM IE offset from the beginning of the template. */
+ u32 tim_ie_offset;
+
+ /* P2P IE offset from the begining of the template */
+ u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Link Parameters */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* channel number in which the RADAR detected */
+ u8 channel_number;
+
+ /* RADAR pulse width in usecond */
+ u16 radar_pulse_width;
+
+ /* Number of RADAR pulses */
+ u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta[ETH_ALEN];
+ u8 dialog_token;
+ u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+ u32 probe_resp_template_len;
+ u32 proxy_probe_req_valid_ie_bmap[8];
+ u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 aid;
+ u16 sta_id;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* HAL copies bssid from the sta table. */
+ u8 addr2[ETH_ALEN];
+
+ /* To unify the keepalive / unknown A2 / tim-based disa */
+ u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+ struct wcn36xx_hal_msg_header header;
+ u8 aid;
+ u8 sta_index;
+ u8 bss_index;
+ u8 reason_code;
+ u32 status;
+};
+
+struct bt_amp_event_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+ u64 tbtt;
+#endif
+ u8 dtim_count;
+
+ /* DTIM period given to HAL during association may not be valid, if
+ * association is based on ProbeRsp instead of beacon. */
+ u8 dtim_period;
+
+ /* For CCX and 11R Roaming */
+ u32 rssi_filter_period;
+
+ u32 num_beacon_per_rssi_average;
+ u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 send_data_null;
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+ u8 element_id;
+ u8 check_ie_presence;
+ u8 offset;
+ u8 value;
+ u8 bitmask;
+ u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 capability_info;
+ u16 capability_mask;
+ u16 beacon_interval;
+ u16 ie_num;
+ u8 bss_index;
+ u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 ie_Count;
+ u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
+#define WCN36XX_HAL_IPV6_ADDR_LEN 16
+#define WCN36XX_HAL_OFFLOAD_DISABLE 0
+#define WCN36XX_HAL_OFFLOAD_ENABLE 1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
+ (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+ u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ /* Only support 2 possible Network Advertisement IPv6 address */
+ u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ u8 self_addr[ETH_ALEN];
+ u8 src_ipv6_addr_valid:1;
+ u8 target_ipv6_addr1_valid:1;
+ u8 target_ipv6_addr2_valid:1;
+ u8 reserved1:5;
+
+ /* make it DWORD aligned */
+ u8 reserved2;
+
+ /* slot index for this offload */
+ u32 slot_index;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+ u8 offload_Type;
+
+ /* enable or disable */
+ u8 enable;
+
+ union {
+ u8 host_ipv4_addr[4];
+ u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_host_offload_req host_offload_params;
+ struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 packet_type;
+ u32 time_period;
+ u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ s8 threshold1:8;
+ s8 threshold2:8;
+ s8 threshold3:8;
+ u8 thres1_pos_notify:1;
+ u8 thres1_neg_notify:1;
+ u8 thres2_pos_notify:1;
+ u8 thres2_neg_notify:1;
+ u8 thres3_pos_notify:1;
+ u8 thres3_neg_notify:1;
+ u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bk_delivery:1;
+ u8 be_delivery:1;
+ u8 vi_delivery:1;
+ u8 vo_delivery:1;
+ u8 bk_trigger:1;
+ u8 be_trigger:1;
+ u8 vi_trigger:1;
+ u8 vo_trigger:1;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID */
+ u8 id;
+
+ /* Pattern byte offset from beginning of the 802.11 packet to start
+ * of the wake-up pattern */
+ u8 byte_Offset;
+
+ /* Non-Zero Pattern size */
+ u8 size;
+
+ /* Pattern */
+ u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Non-zero pattern mask size */
+ u8 mask_size;
+
+ /* Pattern mask */
+ u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern */
+ u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern mask */
+ u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID of the wakeup pattern to be deleted */
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enables/disables magic packet filtering */
+ u8 magic_packet_enable;
+
+ /* Magic pattern */
+ u8 magic_pattern[ETH_ALEN];
+
+ /* Enables/disables packet pattern filtering in firmware. Enabling
+ * this flag enables broadcast pattern matching in Firmware. If
+ * unicast pattern matching is also desired,
+ * ucUcastPatternFilteringEnable flag must be set tot true as well
+ */
+ u8 pattern_filtering_enable;
+
+ /* Enables/disables unicast packet pattern filtering. This flag
+ * specifies whether we want to do pattern match on unicast packets
+ * as well and not just broadcast packets. This flag has no effect
+ * if the ucPatternFilteringEnable (main controlling flag) is set
+ * to false
+ */
+ u8 ucast_pattern_filtering_enable;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Channel Switch
+ * Action Frame.
+ */
+ u8 wow_channel_switch_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the
+ * Deauthentication Frame.
+ */
+ u8 wow_deauth_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Disassociation
+ * Frame.
+ */
+ u8 wow_disassoc_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it has missed consecutive
+ * beacons. This is a hardware register configuration (NOT a
+ * firmware configuration).
+ */
+ u8 wow_max_missed_beacons;
+
+ /* This configuration is valid only when magicPktEnable=1. This is
+ * a timeout value in units of microsec. It requests hardware to
+ * unconditionally wake up after it has stayed in WoWLAN mode for
+ * some time. Set 0 to disable this feature.
+ */
+ u8 wow_max_sleep;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAP-ID requests embedded in EAPOL frames and use this as a wake
+ * source.
+ */
+ u8 wow_eap_id_request_enable;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAPOL-4WAY requests and use this as a wake source.
+ */
+ u8 wow_eapol_4way_enable;
+
+ /* This configuration allows a host wakeup on an network scan
+ * offload match.
+ */
+ u8 wow_net_scan_offload_match;
+
+ /* This configuration allows a host wakeup on any GTK rekeying
+ * error.
+ */
+ u8 wow_gtk_rekey_error;
+
+ /* This configuration allows a host wakeup on BSS connection loss.
+ */
+ u8 wow_bss_connection_loss;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA index */
+ u8 sta_idx;
+
+ /* Access Category */
+ u8 ac;
+
+ /* User Priority */
+ u8 up;
+
+ /* Service Interval */
+ u32 service_interval;
+
+ /* Suspend Interval */
+ u32 suspend_interval;
+
+ /* Delay Interval */
+ u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 set_mcst_bcst_filter_setting;
+ u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 rssi_thres1_pos_cross:1;
+ u32 rssi_thres1_neg_cross:1;
+ u32 rssi_thres2_pos_cross:1;
+ u32 rssi_thres2_neg_cross:1;
+ u32 rssi_thres3_pos_cross:1;
+ u32 rssi_thres3_neg_cross:1;
+ u32 avg_rssi:8;
+ u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 sta_id;
+ s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_max_tx_pwr_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSID is needed to identify which session issued this request.
+ * As the request has power constraints, this should be applied
+ * only to that session */
+ u8 bssid[ETH_ALEN];
+
+ u8 self_addr[ETH_ALEN];
+
+ /* In request, power == MaxTx power to be used. */
+ u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* power == tx power used for management frames */
+ u8 power;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+
+ u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 opp_ps;
+ u32 ct_window;
+ u8 count;
+ u32 duration;
+ u32 interval;
+ u32 single_noa_duration;
+ u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Self STA Index */
+ u8 self_sta_index;
+
+ /* DPU Index (IGTK, PTK, GTK all same) */
+ u8 dpu_index;
+
+ /* DPU Signature */
+ u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_idx;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+ * This will carry the bitmap with the bit positions representing
+ * different AC.s */
+ u16 tspec_index;
+
+ /* Tspec info per AC To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status0;
+
+ /* FIXME PRIMA for future use for 11R */
+ u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Length of the responce message */
+ u32 rsp_length;
+
+ /* FIXME: Currently considering the the responce will be less than
+ * 100bytes */
+ u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Coex Indication Type */
+ u32 type;
+
+ /* Coex Indication Data */
+ u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Tx Complete Indication Success or Failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 configured_mcst_bcst_filter_setting;
+ u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 dot11_exclude_unencrypted;
+ u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 index;
+ u8 opp_ps_flag;
+ u16 ctwin;
+
+ u16 noa1_interval_count;
+ u16 bss_index;
+ u32 noa1_duration;
+ u32 noa1_interval;
+ u32 noa1_starttime;
+
+ u16 noa2_interval_count;
+ u16 reserved2;
+ u32 noa2_duration;
+ u32 noa2_interval;
+ u32 noa2_start_time;
+
+ u32 status;
+};
+
+struct noa_start_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 sta_idx;
+
+ /* Peer MAC Address, whose BA session has timed out */
+ u8 peer_addr[ETH_ALEN];
+
+ /* TID for which a BA session timeout is being triggered */
+ u8 ba_tid;
+
+ /* DELBA direction
+ * 1 - Originator
+ * 0 - Recipient
+ */
+ u8 direction;
+
+ u32 reason_code;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+ PNO_MODE_IMMEDIATE,
+ PNO_MODE_ON_SUSPEND,
+ PNO_MODE_ON_RESUME,
+ PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+ AUTH_TYPE_ANY = 0,
+ AUTH_TYPE_OPEN_SYSTEM = 1,
+
+ /* Upper layer authentication types */
+ AUTH_TYPE_WPA = 2,
+ AUTH_TYPE_WPA_PSK = 3,
+
+ AUTH_TYPE_RSN = 4,
+ AUTH_TYPE_RSN_PSK = 5,
+ AUTH_TYPE_FT_RSN = 6,
+ AUTH_TYPE_FT_RSN_PSK = 7,
+ AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+ AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+ AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+ ED_ANY = 0,
+ ED_NONE = 1,
+ ED_WEP = 2,
+ ED_TKIP = 3,
+ ED_CCMP = 4,
+ ED_WPI = 5,
+
+ ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast type */
+enum ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+
+ BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+struct scan_timer {
+ /* How much it should wait */
+ u32 value;
+
+ /* How many times it should repeat that wait value 0 - keep using
+ * this timer until PNO is disabled */
+ u32 repeat;
+
+ /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+ * times - after that it will wait 4s between consecutive scans
+ * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+ /* set to 0 if you wish for PNO to use its default telescopic timer */
+ u8 count;
+
+ /* A set value represents the amount of time that PNO will wait
+ * between two consecutive scan procedures If the desired is for a
+ * uniform timer that fires always at the exact same interval - one
+ * single value is to be set If there is a desire for a more
+ * complex - telescopic like timer multiple values can be set -
+ * once PNO reaches the end of the array it will continue scanning
+ * at intervals presented by the last value */
+ struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* SSID broadcast type, normal, hidden or unknown */
+ enum ssid_bcast_type bcast_network_type;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request - just to indicate that PNO has
+ * acknowledged the request and will start scanning */
+ u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Network that was found with the highest RSSI */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Indicates the RSSI */
+ u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* RSSI Threshold */
+ u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* 0: disable, 1:enable */
+ u8 tx_per_tracking_enable;
+
+ /* Check period, unit is sec. */
+ u8 tx_per_tracking_period;
+
+ /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+ u8 tx_per_tracking_ratio;
+
+ /* A watermark of check number, once the tx packet exceed this
+ * number, we do the check, default is 5 */
+ u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
+#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
+#define WCN36XX_HAL_MAX_NUM_FILTERS 20
+#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+ HAL_RCV_FILTER_TYPE_INVALID,
+ HAL_RCV_FILTER_TYPE_FILTER_PKT,
+ HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+ HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+ HAL_FILTER_PROTO_TYPE_INVALID,
+ HAL_FILTER_PROTO_TYPE_MAC,
+ HAL_FILTER_PROTO_TYPE_ARP,
+ HAL_FILTER_PROTO_TYPE_IPV4,
+ HAL_FILTER_PROTO_TYPE_IPV6,
+ HAL_FILTER_PROTO_TYPE_UDP,
+ HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+ HAL_FILTER_CMP_TYPE_INVALID,
+ HAL_FILTER_CMP_TYPE_EQUAL,
+ HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+ HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+ HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+ u8 protocol_layer;
+ u8 cmp_flag;
+
+ /* Length of the data to compare */
+ u16 data_length;
+
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ /* Reserved field */
+ u8 reserved;
+
+ /* Data to compare */
+ u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+ /* Mask to be applied on the received packet data before compare */
+ u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coleasce_time;
+ u8 bss_index;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coalesce_time;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ u32 mc_addr_count;
+ u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+ u8 id;
+ u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ u32 match_count;
+ struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+ matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+ /* only valid for response message */
+ u32 status;
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+ u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Ignore DTIM */
+ u32 ignore_dtim;
+
+ /* DTIM Period */
+ u32 dtim_period;
+
+ /* Listen Interval */
+ u32 listen_interval;
+
+ /* Broadcast Multicast Filter */
+ u32 bcast_mcast_filter;
+
+ /* Beacon Early Termination */
+ u32 enable_bet;
+
+ /* Beacon Early Termination Interval */
+ u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+ struct wcn36xx_hal_msg_header header;
+
+ u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+ WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+ /* rekey detected, but not handled */
+ WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+ /* MIC check error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+ /* decryption error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+ /* M1 replay detected */
+ WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+ /* missing GTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+ /* missing iGTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+ /* key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+ /* iGTK key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+ /* GTK rekey M2 response TX error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+ /* non-specific general error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+ WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+ /* magic packet match */
+ WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+ /* host defined pattern match */
+ WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+ /* EAP-ID frame detected */
+ WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+ /* start of EAPOL 4-way handshake detected */
+ WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+ /* network scan offload match */
+ WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+ /* GTK rekey status wakeup (see status) */
+ WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+ /* BSS connection lost */
+ WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+ Wake Packet which is saved at tWakeReasonParams.DataStart
+ This data is sent for any wake reasons that involve a packet-based wakeup :
+
+ WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+ The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* see tWakeReasonType */
+ u32 reason;
+
+ /* argument specific to the reason type */
+ u32 reason_arg;
+
+ /* length of optional data stored in this message, in case HAL
+ * truncates the data (i.e. data packets) this length will be less
+ * than the actual length */
+ u32 stored_data_len;
+
+ /* actual length of data */
+ u32 actual_data_len;
+
+ /* variable length start of data (length == storedDataLen) see
+ * specific wake type */
+ u8 data_start[1];
+
+ u32 bss_index:8;
+ u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG 0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* optional flags */
+ u32 flags;
+
+ /* Key confirmation key */
+ u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+ /* key encryption key */
+ u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+ /* replay counter */
+ u64 key_replay_counter;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* last rekey status when the rekey was offloaded */
+ u32 last_rekey_status;
+
+ /* current replay counter value */
+ u64 key_replay_counter;
+
+ /* total rekey attempts */
+ u32 total_rekey_count;
+
+ /* successful GTK rekeys */
+ u32 gtk_rekey_count;
+
+ /* successful iGTK rekeys */
+ u32 igtk_rekey_count;
+
+ u8 bss_index;
+};
+
+struct dhcp_info {
+ /* Indicates the device mode which indicates about the DHCP activity */
+ u8 device_mode;
+
+ u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/*
+ * Thermal Mitigation mode of operation.
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ * and reducing transmit power
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+ HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+ HAL_THERMAL_MITIGATION_MODE_0,
+ HAL_THERMAL_MITIGATION_MODE_1,
+ HAL_THERMAL_MITIGATION_MODE_2,
+ HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ * Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+ HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+ HAL_THERMAL_MITIGATION_LEVEL_0,
+ HAL_THERMAL_MITIGATION_LEVEL_1,
+ HAL_THERMAL_MITIGATION_LEVEL_2,
+ HAL_THERMAL_MITIGATION_LEVEL_3,
+ HAL_THERMAL_MITIGATION_LEVEL_4,
+ HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Thermal Mitigation Operation Mode */
+ enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+ /* Thermal Mitigation Level */
+ enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Duration over which this stats was collected */
+ u32 duration;
+
+ /* Per STA Stats */
+
+ /* TX stats */
+ u32 tx_bytes_pushed;
+ u32 tx_packets_pushed;
+
+ /* RX stats */
+ u32 rx_bytes_rcvd;
+ u32 rx_packets_rcvd;
+ u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 000000000000..7839b31e4826
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+ CHAN2G(2412, 1), /* Channel 1 */
+ CHAN2G(2417, 2), /* Channel 2 */
+ CHAN2G(2422, 3), /* Channel 3 */
+ CHAN2G(2427, 4), /* Channel 4 */
+ CHAN2G(2432, 5), /* Channel 5 */
+ CHAN2G(2437, 6), /* Channel 6 */
+ CHAN2G(2442, 7), /* Channel 7 */
+ CHAN2G(2447, 8), /* Channel 8 */
+ CHAN2G(2452, 9), /* Channel 9 */
+ CHAN2G(2457, 10), /* Channel 10 */
+ CHAN2G(2462, 11), /* Channel 11 */
+ CHAN2G(2467, 12), /* Channel 12 */
+ CHAN2G(2472, 13), /* Channel 13 */
+ CHAN2G(2484, 14) /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5700, 140),
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (_hw_rate) \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+ RATE(10, HW_RATE_INDEX_1MBPS, 0),
+ RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+ .channels = wcn_2ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
+ .bitrates = wcn_2ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+ .channels = wcn_5ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
+ .bitrates = wcn_5ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+ struct wcn36xx_sta *sta_priv)
+{
+ return NL80211_IFTYPE_STATION == vif->type ?
+ sta_priv->bss_sta_index :
+ sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+ int ret;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+ /* SMD initialization */
+ ret = wcn36xx_smd_open(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to open smd channel: %d\n", ret);
+ goto out_err;
+ }
+
+ /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+ ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+ goto out_smd_close;
+ }
+
+ ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+ goto out_free_dxe_pool;
+ }
+
+ wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ wcn36xx_err("Failed to allocate smd buf\n");
+ ret = -ENOMEM;
+ goto out_free_dxe_ctl;
+ }
+
+ ret = wcn36xx_smd_load_nv(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to push NV to chip\n");
+ goto out_free_smd_buf;
+ }
+
+ ret = wcn36xx_smd_start(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to start chip\n");
+ goto out_free_smd_buf;
+ }
+
+ /* DMA channel initialization */
+ ret = wcn36xx_dxe_init(wcn);
+ if (ret) {
+ wcn36xx_err("DXE init failed\n");
+ goto out_smd_stop;
+ }
+
+ wcn36xx_debugfs_init(wcn);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ }
+ INIT_LIST_HEAD(&wcn->vif_list);
+ return 0;
+
+out_smd_stop:
+ wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+ kfree(wcn->hal_buf);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+ wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+ wcn36xx_smd_close(wcn);
+out_err:
+ return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+ wcn36xx_debugfs_exit(wcn);
+ wcn36xx_smd_stop(wcn);
+ wcn36xx_dxe_deinit(wcn);
+ wcn36xx_smd_close(wcn);
+
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_dxe_free_ctl_blks(wcn);
+
+ kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ int ch = WCN36XX_HW_CHANNEL(wcn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+ ch);
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+ }
+
+ return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total, u64 multicast)
+{
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+ *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ if (control->sta)
+ sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+ if (wcn36xx_start_tx(wcn, sta_priv, skb))
+ ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ int ret = 0;
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+ cmd, key_conf->cipher, key_conf->keyidx,
+ key_conf->keylen, key_conf->flags);
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+ key_conf->key,
+ key_conf->keylen);
+
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+ break;
+ default:
+ wcn36xx_err("Unsupported key type 0x%x\n",
+ key_conf->cipher);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+ /*
+ * Supplicant is sending key in the wrong order:
+ * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+ * but HW expects it to be in the order as described in
+ * IEEE 802.11 spec (see chapter 11.7) like this:
+ * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+ */
+ memcpy(key, key_conf->key, 16);
+ memcpy(key + 16, key_conf->key + 24, 8);
+ memcpy(key + 24, key_conf->key + 16, 8);
+ } else {
+ memcpy(key, key_conf->key, key_conf->keylen);
+ }
+
+ if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+ sta_priv->is_data_encrypted = true;
+ /* Reconfigure bss with encrypt_type */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_smd_config_bss(wcn,
+ vif,
+ sta,
+ sta->addr,
+ true);
+
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ } else {
+ wcn36xx_smd_set_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key);
+ if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+ (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
+ }
+ break;
+ case DISABLE_KEY:
+ if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx);
+ } else {
+ sta_priv->is_data_encrypted = false;
+ /* do not remove key if disassociated */
+ if (sta_priv->aid)
+ wcn36xx_smd_remove_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ get_sta_index(vif, sta_priv));
+ }
+ break;
+ default:
+ wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto out;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+ wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_end_scan(wcn);
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, size;
+ u16 *rates_table;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ u32 rates = sta->supp_rates[band];
+
+ memset(&sta_priv->supported_rates, 0,
+ sizeof(sta_priv->supported_rates));
+ sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+ rates_table = sta_priv->supported_rates.dsss_rates;
+ if (band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_2ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+ }
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+ rates_table = sta_priv->supported_rates.ofdm_rates;
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_5ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
+ memcpy(sta_priv->supported_rates.supported_mcs_set,
+ sta->ht_cap.mcs.rx_mask,
+ sizeof(sta->ht_cap.mcs.rx_mask));
+ }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+ HW_RATE_INDEX_6MBPS,
+ HW_RATE_INDEX_9MBPS,
+ HW_RATE_INDEX_12MBPS,
+ HW_RATE_INDEX_18MBPS,
+ HW_RATE_INDEX_24MBPS,
+ HW_RATE_INDEX_36MBPS,
+ HW_RATE_INDEX_48MBPS,
+ HW_RATE_INDEX_54MBPS
+ };
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+ HW_RATE_INDEX_1MBPS,
+ HW_RATE_INDEX_2MBPS,
+ HW_RATE_INDEX_5_5MBPS,
+ HW_RATE_INDEX_11MBPS
+ };
+
+ rates->op_rate_mode = STA_11n;
+ memcpy(rates->dsss_rates, dsss_rates,
+ sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+ memcpy(rates->ofdm_rates, ofdm_rates,
+ sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+ rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct sk_buff *skb = NULL;
+ u16 tim_off, tim_len;
+ enum wcn36xx_hal_link_state link_state;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ vif, changed);
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed dtim period %d\n",
+ bss_conf->dtim_period);
+
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss PS set %d\n",
+ bss_conf->ps);
+ if (bss_conf->ps) {
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+ bss_conf->bssid);
+
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ vif_priv->is_joining = true;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_join(wcn, bss_conf->bssid,
+ vif->addr, WCN36XX_HW_CHANNEL(wcn));
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ bss_conf->bssid, false);
+ } else {
+ vif_priv->is_joining = false;
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_SSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed ssid\n");
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+ bss_conf->ssid, bss_conf->ssid_len);
+
+ vif_priv->ssid.length = bss_conf->ssid_len;
+ memcpy(&vif_priv->ssid.ssid,
+ bss_conf->ssid,
+ bss_conf->ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ vif_priv->is_joining = false;
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta;
+ struct wcn36xx_sta *sta_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac assoc bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ wcn36xx_err("sta %pM is not found\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ goto out;
+ }
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+ wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE);
+ wcn36xx_smd_config_bss(wcn, vif, sta,
+ bss_conf->bssid,
+ true);
+ sta_priv->aid = bss_conf->aid;
+ /*
+ * config_sta must be called from because this is the
+ * place where AID is available.
+ */
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ rcu_read_unlock();
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "disassociated bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+ wcn36xx_smd_set_link_st(wcn,
+ bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+ skb = ieee80211_proberesp_get(hw, vif);
+ if (!skb) {
+ wcn36xx_err("failed to alloc probereq skb\n");
+ goto out;
+ }
+
+ wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+ dev_kfree_skb(skb);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed beacon enabled %d\n",
+ bss_conf->enable_beacon);
+
+ if (bss_conf->enable_beacon) {
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ vif->addr, false);
+ skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+ &tim_len);
+ if (!skb) {
+ wcn36xx_err("failed to alloc beacon skb\n");
+ goto out;
+ }
+ wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+ dev_kfree_skb(skb);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+ else
+ link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ link_state);
+ } else {
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+out:
+ return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+ return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+ list_del(&vif_priv->list);
+ wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+ vif, vif->type);
+
+ if (!(NL80211_IFTYPE_STATION == vif->type ||
+ NL80211_IFTYPE_AP == vif->type ||
+ NL80211_IFTYPE_ADHOC == vif->type ||
+ NL80211_IFTYPE_MESH_POINT == vif->type)) {
+ wcn36xx_warn("Unsupported interface type requested: %d\n",
+ vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ list_add(&vif_priv->list, &wcn->vif_list);
+ wcn36xx_smd_add_sta_self(wcn, vif);
+
+ return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ vif, sta->addr);
+
+ vif_priv->sta = sta_priv;
+ sta_priv->vif = vif_priv;
+ /*
+ * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+ * at this stage AID is not available yet.
+ */
+ if (NL80211_IFTYPE_STATION != vif->type) {
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+ sta_priv->aid = sta->aid;
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
+ return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ vif, sta->addr, sta_priv->sta_index);
+
+ wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ vif_priv->sta = NULL;
+ sta_priv->vif = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, true);
+ return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, false);
+ return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ action, tid);
+
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ sta_priv->tid = tid;
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ wcn36xx_err("Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+ .start = wcn36xx_start,
+ .stop = wcn36xx_stop,
+ .add_interface = wcn36xx_add_interface,
+ .remove_interface = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+ .suspend = wcn36xx_suspend,
+ .resume = wcn36xx_resume,
+#endif
+ .config = wcn36xx_config,
+ .configure_filter = wcn36xx_configure_filter,
+ .tx = wcn36xx_tx,
+ .set_key = wcn36xx_set_key,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .bss_info_changed = wcn36xx_bss_info_changed,
+ .set_rts_threshold = wcn36xx_set_rts_threshold,
+ .sta_add = wcn36xx_sta_add,
+ .sta_remove = wcn36xx_sta_remove,
+ .ampdu_action = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+ int ret = 0;
+
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+ wcn->hw->wiphy->cipher_suites = cipher_suites;
+ wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+ wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+ wcn->hw->max_listen_interval = 200;
+
+ wcn->hw->queues = 4;
+
+ SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+ wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+ wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+ return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ /* Set TX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get tx_irq\n");
+ return -ENOENT;
+ }
+ wcn->tx_irq = res->start;
+
+ /* Set RX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get rx_irq\n");
+ return -ENOENT;
+ }
+ wcn->rx_irq = res->start;
+
+ /* Map the memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ if (!res) {
+ wcn36xx_err("failed to get mmio\n");
+ return -ENOENT;
+ }
+ wcn->mmio = ioremap(res->start, resource_size(res));
+ if (!wcn->mmio) {
+ wcn36xx_err("failed to map io memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct wcn36xx *wcn;
+ int ret;
+ u8 addr[ETH_ALEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+ hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+ if (!hw) {
+ wcn36xx_err("failed to alloc hw\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ platform_set_drvdata(pdev, hw);
+ wcn = hw->priv;
+ wcn->hw = hw;
+ wcn->dev = &pdev->dev;
+ wcn->ctrl_ops = pdev->dev.platform_data;
+
+ mutex_init(&wcn->hal_mutex);
+
+ if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ wcn36xx_info("mac address: %pM\n", addr);
+ SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+ }
+
+ ret = wcn36xx_platform_get_resources(wcn, pdev);
+ if (ret)
+ goto out_wq;
+
+ wcn36xx_init_ieee80211(wcn);
+ ret = ieee80211_register_hw(wcn->hw);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(wcn->mmio);
+out_wq:
+ ieee80211_free_hw(hw);
+out_err:
+ return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+ mutex_destroy(&wcn->hal_mutex);
+
+ ieee80211_unregister_hw(hw);
+ iounmap(wcn->mmio);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+ {
+ .name = "wcn36xx",
+ .driver_data = 0
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+ .probe = wcn36xx_probe,
+ .remove = wcn36xx_remove,
+ .driver = {
+ .name = "wcn36xx",
+ .owner = THIS_MODULE,
+ },
+ .id_table = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+ platform_driver_register(&wcn36xx_driver);
+ return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 000000000000..28b515c81b0e
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ /* TODO: Make sure the TX chain clean */
+ ret = wcn36xx_smd_enter_bmps(wcn, vif);
+ if (!ret) {
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+ vif_priv->pw_state = WCN36XX_BMPS;
+ } else {
+ /*
+ * One of the reasons why HW will not enter BMPS is because
+ * driver is trying to enter bmps before first beacon was
+ * received just after auth complete
+ */
+ wcn36xx_err("Can not enter BMPS!\n");
+ }
+ return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (WCN36XX_BMPS != vif_priv->pw_state) {
+ wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+ return -EINVAL;
+ }
+ wcn36xx_smd_exit_bmps(wcn, vif);
+ vif_priv->pw_state = WCN36XX_FULL_POWER;
+ return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+ return wcn36xx_smd_keep_alive_req(wcn, vif,
+ WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 000000000000..f72ed68b5a07
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+ WCN36XX_FULL_POWER,
+ WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 000000000000..f8c3a10510c2
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+ struct wcn36xx_hal_cfg *entry;
+ u32 *val;
+
+ if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+ wcn36xx_err("Not enough room for TLV entry\n");
+ return -ENOMEM;
+ }
+
+ entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+ entry->id = id;
+ entry->len = sizeof(u32);
+ entry->pad_bytes = 0;
+ entry->reserve = 0;
+
+ val = (u32 *) (entry + 1);
+ *val = value;
+
+ *len += sizeof(*entry) + sizeof(u32);
+
+ return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+ else if (sta && sta->ht_cap.ht_supported)
+ bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+ else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+ else
+ bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+ return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (sta && sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ bss_params->ht = sta->ht_cap.ht_supported;
+ bss_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ bss_params->lsig_tx_op_protection_full_support =
+ is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+ bss_params->lln_non_gf_coexist =
+ !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+ bss_params->dual_cts_protection = 0;
+ /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+ bss_params->ht20_coexist = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ sta_params->ht_capable = sta->ht_cap.ht_supported;
+ sta_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ sta_params->lsig_txop_protection = is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_amsdu_size = is_cap_supported(caps,
+ IEEE80211_HT_CAP_MAX_AMSDU);
+ sta_params->sgi_20Mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_20);
+ sta_params->sgi_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_40);
+ sta_params->green_field_capable = is_cap_supported(caps,
+ IEEE80211_HT_CAP_GRN_FLD);
+ sta_params->delayed_ba_support = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DELAY_BA);
+ sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ }
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *priv_sta = NULL;
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ sta_params->type = 1;
+ sta_params->sta_index = 0xFF;
+ } else {
+ sta_params->type = 0;
+ sta_params->sta_index = 1;
+ }
+
+ sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ /*
+ * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+ * contains our mac address. In AP mode we are bssid so vif
+ * contains bssid and ieee80211_sta contains mac.
+ */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+ sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->short_preamble_supported =
+ !(WCN36XX_FLAGS(wcn) &
+ IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+ sta_params->rifs_mode = 0;
+ sta_params->rmf = 0;
+ sta_params->action = 0;
+ sta_params->uapsd = 0;
+ sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+ sta_params->max_ampdu_duration = 0;
+ sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->p2p = 0;
+
+ if (sta) {
+ priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+ sta_params->wmm_enabled = sta->wme;
+ sta_params->max_sp_len = sta->max_sp;
+ sta_params->aid = priv_sta->aid;
+ wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+ memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+ sizeof(priv_sta->supported_rates));
+ } else {
+ wcn36xx_set_default_rates(&sta_params->supported_rates);
+ }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+ int ret = 0;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+ init_completion(&wcn->hal_rsp_compl);
+ ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ if (ret) {
+ wcn36xx_err("HAL TX failed\n");
+ goto out;
+ }
+ if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+ msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+ wcn36xx_err("Timeout while waiting SMD response\n");
+ ret = -ETIME;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+ do { \
+ memset(&msg_body, 0, sizeof(msg_body)); \
+ msg_body.header.msg_type = type; \
+ msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.len = sizeof(msg_body); \
+ } while (0) \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+ do { \
+ memset(send_buf, 0, msg_body.header.len); \
+ memcpy(send_buf, &msg_body, sizeof(msg_body)); \
+ } while (0) \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp *rsp;
+
+ if (len < sizeof(struct wcn36xx_hal_msg_header) +
+ sizeof(struct wcn36xx_fw_msg_status_rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_fw_msg_status_rsp *)
+ (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+ const struct firmware *nv;
+ struct nv_data *nv_d;
+ struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+ int fw_bytes_left;
+ int ret;
+ u16 fm_offset = 0;
+
+ ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out_free_nv;
+ }
+
+ nv_d = (struct nv_data *)nv->data;
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+ msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+ msg_body.frag_number = 0;
+ /* hal_buf must be protected with mutex */
+ mutex_lock(&wcn->hal_mutex);
+
+ do {
+ fw_bytes_left = nv->size - fm_offset - 4;
+ if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+ msg_body.last_fragment = 0;
+ msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+ } else {
+ msg_body.last_fragment = 1;
+ msg_body.nv_img_buffer_size = fw_bytes_left;
+
+ /* Do not forget update general message len */
+ msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+ }
+
+ /* Add load NV request message header */
+ memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+ /* Add NV body itself */
+ memcpy(wcn->hal_buf + sizeof(msg_body),
+ &nv_d->table + fm_offset,
+ msg_body.nv_img_buffer_size);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret)
+ goto out_unlock;
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_load_nv response failed err=%d\n",
+ ret);
+ goto out_unlock;
+ }
+ msg_body.frag_number++;
+ fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+ } while (msg_body.last_fragment != 1);
+
+out_unlock:
+ mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+ release_firmware(nv);
+
+ return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+ return -EIO;
+
+ memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+ memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+
+ /* null terminate the strings, just in case */
+ wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+ wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+ wcn->fw_revision = rsp->start_rsp_params.version.revision;
+ wcn->fw_version = rsp->start_rsp_params.version.version;
+ wcn->fw_minor = rsp->start_rsp_params.version.minor;
+ wcn->fw_major = rsp->start_rsp_params.version.major;
+
+ wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+ wcn->wlan_version, wcn->crm_version);
+
+ wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+ wcn->fw_major, wcn->fw_minor,
+ wcn->fw_version, wcn->fw_revision,
+ rsp->start_rsp_params.stations,
+ rsp->start_rsp_params.bssids);
+
+ return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_start_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+ msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+ msg_body.params.len = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+ msg_body.params.type);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_stop_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+ msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_init_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_init_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_start_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_end_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_end_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_finish_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+ msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_finish_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+ int ret = 0;
+
+ ret = wcn36xx_smd_rsp_status_check(buf, len);
+ if (ret)
+ return ret;
+ rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+ rsp->channel_number, rsp->status);
+ return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch)
+{
+ struct wcn36xx_hal_switch_channel_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+ msg_body.channel_number = (u8)ch;
+ msg_body.tx_mgmt_power = 0xbf;
+ msg_body.max_tx_power = 0xbf;
+ memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_switch_channel failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+ rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+ /* Remove the PNO version bit */
+ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+ wcn36xx_warn("error response from update scan\n");
+ return rsp->status;
+ }
+
+ return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_update_scan_params_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+ msg_body.dot11d_enabled = 0;
+ msg_body.dot11d_resolved = 0;
+ msg_body.channel_count = 26;
+ msg_body.active_min_ch_time = 60;
+ msg_body.active_max_ch_time = 120;
+ msg_body.passive_min_ch_time = 60;
+ msg_body.passive_max_ch_time = 110;
+ msg_body.state = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update scan params channel_count %d\n",
+ msg_body.channel_count);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_scan_params failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal add sta self failure: %d\n",
+ rsp->status);
+ return rsp->status;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+ rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+ priv_vif->self_sta_index = rsp->self_sta_index;
+ priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+ return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_sta_self_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self self_addr %pM status %d\n",
+ msg_body.self_addr, msg_body.status);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+ struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_delete_sta_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+ msg_body.sta_index = sta_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal delete sta sta_index %d\n",
+ msg_body.sta_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_join_rsp_msg *rsp;
+
+ if (wcn36xx_smd_rsp_status_check(buf, len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal rsp join status %d tx_mgmt_power %d\n",
+ rsp->status, rsp->tx_mgmt_power);
+
+ return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+ struct wcn36xx_hal_join_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+ msg_body.channel = ch;
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+ else
+ msg_body.secondary_channel_offset =
+ PHY_SINGLE_CHANNEL_CENTERED;
+
+ msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+ msg_body.max_tx_power = 0xbf;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+ msg_body.bssid, msg_body.self_sta_mac_addr,
+ msg_body.channel, msg_body.link_state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_join failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_join response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state)
+{
+ struct wcn36xx_hal_set_link_state_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+ msg_body.state = state;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+ msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_link_st failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_params *orig,
+ struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+ /* convert orig to v1 format */
+ memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+ memcpy(&v1->mac, orig->mac, ETH_ALEN);
+ v1->aid = orig->aid;
+ v1->type = orig->type;
+ v1->listen_interval = orig->listen_interval;
+ v1->ht_capable = orig->ht_capable;
+
+ v1->max_ampdu_size = orig->max_ampdu_size;
+ v1->max_ampdu_density = orig->max_ampdu_density;
+ v1->sgi_40mhz = orig->sgi_40mhz;
+ v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+ memcpy(&v1->supported_rates, &orig->supported_rates,
+ sizeof(orig->supported_rates));
+ v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+ struct config_sta_rsp_params *params;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+ params = &rsp->params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config sta response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ sta_priv->sta_index = params->sta_index;
+ sta_priv->dpu_desc_index = params->dpu_index;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ params->status, params->sta_index, params->bssid_index,
+ params->p2p);
+
+ return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+ struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+ &msg_body.sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta->action, sta->sta_index, sta->bssid_index,
+ sta->bssid, sta->type, sta->mac, sta->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx_hal_config_sta_req_msg msg;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ sta_params = &msg.sta_params;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_sta_rsp(wcn,
+ sta,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ /* convert orig to v1 */
+ memcpy(&msg_body.bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+ memcpy(&msg_body.bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+ msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+ msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+ msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+ msg_body.bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+ msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+ msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+ msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+ msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+ msg_body.bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+ msg_body.bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+ msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+ msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+ msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+ msg_body.bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+ msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+ msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+ msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+ memcpy(&msg_body.bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+ msg_body.bss_params.action = orig->bss_params.action;
+ msg_body.bss_params.rateset = orig->bss_params.rateset;
+ msg_body.bss_params.ht = orig->bss_params.ht;
+ msg_body.bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+ msg_body.bss_params.rmf = orig->bss_params.rmf;
+ msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+ msg_body.bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+ msg_body.bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+ msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+ msg_body.bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+ msg_body.bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+ memcpy(&msg_body.bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+ memcpy(&msg_body.bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+ memcpy(&msg_body.bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+ memcpy(&msg_body.bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+ memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+ msg_body.bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+ msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+ msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+ &msg_body.bss_params.sta);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
+
+ priv_vif->bss_index = params->bss_index;
+
+ if (priv_vif->sta) {
+ priv_vif->sta->bss_sta_index = params->bss_sta_index;
+ priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ }
+
+ priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg.bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+
+ memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+ memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+ /* AP */
+ bss->oper_mode = 0;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ } else {
+ wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+ else
+ bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+ bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+ bss->lla_coexist = 0;
+ bss->llb_coexist = 0;
+ bss->llg_coexist = 0;
+ bss->rifs_mode = 0;
+ bss->beacon_interval = vif->bss_conf.beacon_int;
+ bss->dtim_period = vif_priv->dtim_period;
+
+ wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+ bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ bss->reserved = 0;
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ /* wcn->ssid is only valid in AP and IBSS mode */
+ bss->ssid.length = vif_priv->ssid.length;
+ memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+ bss->obss_prot_enabled = 0;
+ bss->rmf = 0;
+ bss->max_probe_resp_retry_limit = 0;
+ bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+ bss->proxy_probe_resp = 0;
+ bss->edca_params_valid = 0;
+
+ /* FIXME: set acbe, acbk, acvi and acvo */
+
+ bss->ext_set_sta_key_param_valid = 0;
+
+ /* FIXME: set ext_set_sta_key_param */
+
+ bss->spectrum_mgt_enable = 0;
+ bss->tx_mgmt_power = 0;
+ bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+ bss->action = update;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta_params->bssid, sta_params->action,
+ sta_params->sta_index, sta_params->bssid_index,
+ sta_params->aid, sta_params->type,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_bss_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_delete_bss_req_msg msg_body;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+ msg_body.bss_index = priv_vif->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off)
+{
+ struct wcn36xx_hal_send_beacon_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.beacon_length = skb_beacon->len + 6;
+
+ if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+ memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+ memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+ skb_beacon->len);
+ } else {
+ wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ msg_body.beacon_length);
+ return -ENOMEM;
+ }
+ memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.tim_ie_offset = tim_off+4;
+ msg_body.p2p_ie_offset = p2p_off;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal send beacon beacon_length %d\n",
+ msg_body.beacon_length);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_send_beacon failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wcn36xx_hal_send_probe_resp_req_msg msg;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+ if (skb->len > BEACON_TEMPLATE_SIZE) {
+ wcn36xx_warn("probe response template is too big: %d\n",
+ skb->len);
+ return -E2BIG;
+ }
+
+ msg.probe_resp_template_len = skb->len;
+ memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+ memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update probe rsp len %d bssid %pM\n",
+ msg.probe_resp_template_len, msg.bssid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+ msg_body.set_sta_key_params.sta_index = sta_index;
+ msg_body.set_sta_key_params.enc_type = enc_type;
+
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ msg_body.set_sta_key_params.single_tid_rc = 1;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key)
+{
+ struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.num_keys = 1;
+ msg_body.keys[0].id = keyidx;
+ msg_body.keys[0].unicast = 0;
+ msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+ msg_body.keys[0].pae_role = 0;
+ msg_body.keys[0].length = keylen;
+ memcpy(msg_body.keys[0].key, key, keylen);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+ msg_body.sta_idx = sta_index;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx)
+{
+ struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.tbtt = vif->bss_conf.sync_tsf;
+ msg_body.dtim_period = vif_priv->dtim_period;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+ struct wcn36xx_hal_set_power_params_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+ /*
+ * When host is down ignore every second dtim
+ */
+ if (ignore_dtim) {
+ msg_body.ignore_dtim = 1;
+ msg_body.dtim_period = 2;
+ }
+ msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_power_params failed\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type)
+{
+ struct wcn36xx_hal_keep_alive_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+ if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+ msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+ } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+ /* TODO: it also support ARP response type */
+ } else {
+ wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+ return -EINVAL;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5)
+{
+ struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+ msg_body.arg1 = arg1;
+ msg_body.arg2 = arg2;
+ msg_body.arg3 = arg3;
+ msg_body.arg4 = arg4;
+ msg_body.arg5 = arg5;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_dump_cmd failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+ int ret = 0;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+ return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+ enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_feat_caps_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+ set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+ msg_body.sta_index = sta_index;
+ memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+ msg_body.dialog_token = 0x10;
+ msg_body.tid = tid;
+
+ /* Immediate BA because Delayed BA is not supported */
+ msg_body.policy = 1;
+ msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+ msg_body.timeout = 0;
+ if (ssn)
+ msg_body.ssn = *ssn;
+ msg_body.direction = direction;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba_session failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_add_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+ struct wcn36xx_hal_del_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+ msg_body.sta_index = sta_index;
+ msg_body.tid = tid;
+ msg_body.direction = 0;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_del_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+ struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.candidate_cnt = 1;
+ msg_body.header.len += sizeof(*candidate);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+ (wcn->hal_buf + sizeof(msg_body));
+ candidate->sta_index = sta_index;
+ candidate->tid_bitmap = 1;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_trigger_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Bad TX complete indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+ return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ /* Old FW does not have bss index */
+ if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ tmp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ }
+ return 0;
+ }
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted missed beacon indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (tmp->bss_index == rsp->bss_index) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ rsp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+ return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_sta *sta = NULL;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete sta indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+ sta = container_of((void *)tmp->sta,
+ struct ieee80211_sta,
+ drv_priv);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d\n",
+ rsp->addr2,
+ rsp->sta_id);
+ ieee80211_report_low_ack(sta, 0);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("STA with addr %pM and index %d not found\n",
+ rsp->addr2,
+ rsp->sta_id);
+ return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+ struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+ size_t len;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+ len = msg_body.header.len;
+
+ put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+ body->header.len = len;
+ body->len = len - sizeof(*body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_cfg failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct wcn36xx_hal_ind_msg *msg_ind;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_START_RSP:
+ case WCN36XX_HAL_CONFIG_STA_RSP:
+ case WCN36XX_HAL_CONFIG_BSS_RSP:
+ case WCN36XX_HAL_ADD_STA_SELF_RSP:
+ case WCN36XX_HAL_STOP_RSP:
+ case WCN36XX_HAL_DEL_STA_SELF_RSP:
+ case WCN36XX_HAL_DELETE_STA_RSP:
+ case WCN36XX_HAL_INIT_SCAN_RSP:
+ case WCN36XX_HAL_START_SCAN_RSP:
+ case WCN36XX_HAL_END_SCAN_RSP:
+ case WCN36XX_HAL_FINISH_SCAN_RSP:
+ case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+ case WCN36XX_HAL_DELETE_BSS_RSP:
+ case WCN36XX_HAL_SEND_BEACON_RSP:
+ case WCN36XX_HAL_SET_LINK_ST_RSP:
+ case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+ case WCN36XX_HAL_SET_BSSKEY_RSP:
+ case WCN36XX_HAL_SET_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_BSSKEY_RSP:
+ case WCN36XX_HAL_ENTER_BMPS_RSP:
+ case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+ case WCN36XX_HAL_EXIT_BMPS_RSP:
+ case WCN36XX_HAL_KEEP_ALIVE_RSP:
+ case WCN36XX_HAL_DUMP_COMMAND_RSP:
+ case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+ case WCN36XX_HAL_ADD_BA_RSP:
+ case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_TRIGGER_BA_RSP:
+ case WCN36XX_HAL_UPDATE_CFG_RSP:
+ case WCN36XX_HAL_JOIN_RSP:
+ case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+ case WCN36XX_HAL_CH_SWITCH_RSP:
+ case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ memcpy(wcn->hal_buf, buf, len);
+ wcn->hal_rsp_len = len;
+ complete(&wcn->hal_rsp_compl);
+ break;
+
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ mutex_lock(&wcn->hal_ind_mutex);
+ msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ memcpy(msg_ind->msg, buf, len);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ mutex_unlock(&wcn->hal_ind_mutex);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, hal_ind_work);
+ struct wcn36xx_hal_msg_header *msg_header;
+ struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+ mutex_lock(&wcn->hal_ind_mutex);
+
+ hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+ struct wcn36xx_hal_ind_msg,
+ list);
+
+ msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ wcn36xx_smd_tx_compl_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ wcn36xx_smd_missed_beacon_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ wcn36xx_smd_delete_sta_context_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+ list_del(wcn->hal_ind_queue.next);
+ kfree(hal_ind_msg->msg);
+ kfree(hal_ind_msg);
+ mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+ int ret = 0;
+ wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+ if (!wcn->hal_ind_wq) {
+ wcn36xx_err("failed to allocate wq\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+ INIT_LIST_HEAD(&wcn->hal_ind_queue);
+ mutex_init(&wcn->hal_ind_mutex);
+
+ ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+ if (ret) {
+ wcn36xx_err("failed to open control channel\n");
+ goto free_wq;
+ }
+
+ return ret;
+
+free_wq:
+ destroy_workqueue(wcn->hal_ind_wq);
+out:
+ return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+ wcn->ctrl_ops->close();
+ destroy_workqueue(wcn->hal_ind_wq);
+ mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 000000000000..e7c39019c6f1
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE 3072
+
+#define WCN36XX_HAL_BUF_SIZE 4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
+
+enum wcn36xx_fw_msg_result {
+ WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
+ WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
+
+ WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+ struct list_head list;
+ u8 *msg;
+ size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 000000000000..b2b60e30caaf
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+ return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status status;
+ struct ieee80211_hdr *hdr;
+ struct wcn36xx_rx_bd *bd;
+ u16 fc, sn;
+
+ /*
+ * All fields must be 0, otherwise it can lead to
+ * unexpected consequences.
+ */
+ memset(&status, 0, sizeof(status));
+
+ bd = (struct wcn36xx_rx_bd *)skb->data;
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+ "BD <<< ", (char *)bd,
+ sizeof(struct wcn36xx_rx_bd));
+
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
+
+ status.mactime = 10;
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ status.signal = -get_rssi0(bd);
+ status.antenna = 1;
+ status.rate_idx = 1;
+ status.flag = 0;
+ status.rx_flags = 0;
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
+
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+ status.flag, status.vendor_radiotap_len);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ }
+
+ ieee80211_rx_irqsafe(wcn->hw, skb);
+
+ return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+ u32 mpdu_header_len,
+ u32 len,
+ u16 tid)
+{
+ bd->pdu.mpdu_header_len = mpdu_header_len;
+ bd->pdu.mpdu_header_off = sizeof(*bd);
+ bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+ bd->pdu.mpdu_header_off;
+ bd->pdu.mpdu_len = len;
+ bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+ u8 *addr)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+ return vif_priv;
+ }
+ wcn36xx_warn("vif %pM not found\n", addr);
+ return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct wcn36xx_sta *sta_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *__vif_priv = NULL;
+ bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+ /*
+ * For not unicast frames mac80211 will not set sta pointer so use
+ * self_sta_index instead.
+ */
+ if (sta_priv) {
+ __vif_priv = sta_priv->vif;
+ vif = container_of((void *)__vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bd->sta_index = sta_priv->bss_sta_index;
+ bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bd->sta_index = sta_priv->sta_index;
+ bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+ }
+ } else {
+ __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ }
+
+ bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ (sta_priv && !sta_priv->is_data_encrypted))
+ bd->dpu_ne = 1;
+
+ if (bcast) {
+ bd->ub = 1;
+ bd->ack_policy = 1;
+ }
+ *vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct ieee80211_hdr *hdr,
+ bool bcast)
+{
+ struct wcn36xx_vif *__vif_priv =
+ get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_ne = 1;
+
+ /* default rate for unicast */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ WCN36XX_BD_RATE_CTRL :
+ WCN36XX_BD_RATE_MGMT;
+ else if (ieee80211_is_ctl(hdr->frame_control))
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ else
+ wcn36xx_warn("frame control type unknown\n");
+
+ /*
+ * In joining state trick hardware that probe is sent as
+ * unicast even if address is broadcast.
+ */
+ if (__vif_priv->is_joining &&
+ ieee80211_is_probe_req(hdr->frame_control))
+ bcast = false;
+
+ if (bcast) {
+ /* broadcast */
+ bd->ub = 1;
+ /* No ack needed not unicast */
+ bd->ack_policy = 1;
+ bd->queue_id = WCN36XX_TX_B_WQ_ID;
+ } else
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ *vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ bool is_low = ieee80211_is_data(hdr->frame_control);
+ bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+ if (!bd) {
+ /*
+ * TX DXE are used in pairs. One for the BD and one for the
+ * actual frame. The BD DXE's has a preallocated buffer while
+ * the skb ones does not. If this isn't true something is really
+ * wierd. TODO: Recover from this situation
+ */
+
+ wcn36xx_err("bd address may not be NULL for BD DXE\n");
+ return -EINVAL;
+ }
+
+ memset(bd, 0, sizeof(*bd));
+
+ wcn36xx_dbg(WCN36XX_DBG_TX,
+ "tx skb %p len %d fc %04x sn %d %s %s\n",
+ skb, skb->len, __le16_to_cpu(hdr->frame_control),
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+ bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+ bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+ if (bd->tx_comp) {
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ if (wcn->tx_ack_skb) {
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+ wcn36xx_warn("tx_ack_skb already set\n");
+ return -EINVAL;
+ }
+
+ wcn->tx_ack_skb = skb;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ /* Only one at a time is supported by fw. Stop the TX queues
+ * until the ack status gets back.
+ *
+ * TODO: Add watchdog in case FW does not answer
+ */
+ ieee80211_stop_queues(wcn->hw);
+ }
+
+ /* Data frames served first*/
+ if (is_low) {
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+ } else {
+ /* MGMT and CTRL frames are handeld here*/
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
+ }
+
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ bd->tx_bd_sign = 0xbdbdbdbd;
+
+ return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 000000000000..bbfbcf808c77
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN 24
+#define WCN36XX_BMU_WQ_TX 25
+#define WCN36XX_TID 7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID 0xA
+#define WCN36XX_TX_U_WQ_ID 0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+ u32 dpu_fb:8;
+ u32 adu_fb:8;
+ u32 pdu_id:16;
+
+ /* 0x04*/
+ u32 tail_pdu_idx:16;
+ u32 head_pdu_idx:16;
+
+ /* 0x08*/
+ u32 pdu_count:7;
+ u32 mpdu_data_off:9;
+ u32 mpdu_header_off:8;
+ u32 mpdu_header_len:8;
+
+ /* 0x0c*/
+ u32 reserved4:8;
+ u32 tid:4;
+ u32 reserved3:4;
+ u32 mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 rx_key_id:3;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 uma_bypass:1;
+ u32 csr11:1;
+ u32 reserved0:1;
+ u32 scan_learn:1;
+ u32 rx_ch:4;
+ u32 rtsf:1;
+ u32 bsf:1;
+ u32 a2hf:1;
+ u32 st_auf:1;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 addr3:8;
+ u32 addr2:8;
+ u32 addr1:8;
+ u32 dpu_desc_idx:8;
+
+ /* 0x18*/
+ u32 rxp_flags:23;
+ u32 rate_id:9;
+
+ u32 phy_stat0;
+ u32 phy_stat1;
+
+ /* 0x24 */
+ u32 rx_times;
+
+ u32 pmi_cmd[6];
+
+ /* 0x40 */
+ u32 reserved7:4;
+ u32 reorder_slot_id:6;
+ u32 reorder_fwd_id:6;
+ u32 reserved6:12;
+ u32 reorder_code:4;
+
+ /* 0x44 */
+ u32 exp_seq_num:12;
+ u32 cur_seq_num:12;
+ u32 fr_type_subtype:8;
+
+ /* 0x48 */
+ u32 msdu_size:16;
+ u32 sub_fr_id:4;
+ u32 proc_order:4;
+ u32 reserved9:4;
+ u32 aef:1;
+ u32 lsf:1;
+ u32 esf:1;
+ u32 asf:1;
+};
+
+struct wcn36xx_tx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 fw_tx_comp:1;
+ u32 tx_comp:1;
+ u32 reserved1:1;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 reserved0:12;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 reserved5:7;
+ u32 queue_id:5;
+ u32 bd_rate:2;
+ u32 ack_policy:2;
+ u32 sta_index:8;
+ u32 dpu_desc_idx:8;
+
+ u32 tx_bd_sign;
+ u32 reserved6;
+ u32 dxe_start_time;
+ u32 dxe_end_time;
+
+ /*u32 tcp_udp_start_off:10;
+ u32 header_cks:16;
+ u32 reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 000000000000..58b63833e8e7
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+ WCN36XX_DBG_DXE = 0x00000001,
+ WCN36XX_DBG_DXE_DUMP = 0x00000002,
+ WCN36XX_DBG_SMD = 0x00000004,
+ WCN36XX_DBG_SMD_DUMP = 0x00000008,
+ WCN36XX_DBG_RX = 0x00000010,
+ WCN36XX_DBG_RX_DUMP = 0x00000020,
+ WCN36XX_DBG_TX = 0x00000040,
+ WCN36XX_DBG_TX_DUMP = 0x00000080,
+ WCN36XX_DBG_HAL = 0x00000100,
+ WCN36XX_DBG_HAL_DUMP = 0x00000200,
+ WCN36XX_DBG_MAC = 0x00000400,
+ WCN36XX_DBG_BEACON = 0x00000800,
+ WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+ WCN36XX_DBG_PMC = 0x00002000,
+ WCN36XX_DBG_PMC_DUMP = 0x00004000,
+ WCN36XX_DBG_ANY = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...) \
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...) \
+ printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...) \
+ printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
+ DUMP_PREFIX_OFFSET, 32, 1, \
+ buf, len, false); \
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+ int is_valid;
+ u8 table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+ int (*open)(void *drv_priv, void *rsp_cb);
+ void (*close)(void);
+ int (*tx)(char *buf, size_t len);
+ int (*get_hw_mac)(u8 *addr);
+ int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+ struct list_head list;
+ struct wcn36xx_sta *sta;
+ u8 dtim_period;
+ enum ani_ed_type encrypt_type;
+ bool is_joining;
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Power management */
+ enum wcn36xx_power_state pw_state;
+
+ u8 bss_index;
+ u8 ucast_dpu_signature;
+ /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+ u8 self_sta_index;
+ u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * | | STA | AP |
+ * |______________|_____________|_______________|
+ * | TX BD |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta| sta_index | sta_index |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+ struct wcn36xx_vif *vif;
+ u16 aid;
+ u16 tid;
+ u8 sta_index;
+ u8 dpu_desc_index;
+ u8 bss_sta_index;
+ u8 bss_dpu_desc_index;
+ bool is_data_encrypted;
+ /* Rates */
+ struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ struct list_head vif_list;
+
+ u8 fw_revision;
+ u8 fw_version;
+ u8 fw_minor;
+ u8 fw_major;
+
+ /* extra byte for the NULL termination */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+ /* IRQs */
+ int tx_irq;
+ int rx_irq;
+ void __iomem *mmio;
+
+ struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ /*
+ * smd_buf must be protected with smd_mutex to garantee
+ * that all messages are sent one after another
+ */
+ u8 *hal_buf;
+ size_t hal_rsp_len;
+ struct mutex hal_mutex;
+ struct completion hal_rsp_compl;
+ struct workqueue_struct *hal_ind_wq;
+ struct work_struct hal_ind_work;
+ struct mutex hal_ind_mutex;
+ struct list_head hal_ind_queue;
+
+ /* DXE channels */
+ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
+ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
+ struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
+ struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
+
+ /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+ spinlock_t dxe_lock;
+ bool queues_stopped;
+
+ /* Memory pools */
+ struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+ struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+ struct sk_buff *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+ /* Debug file system entry */
+ struct wcn36xx_dfs_entry dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+ u8 major,
+ u8 minor,
+ u8 version,
+ u8 revision)
+{
+ return (wcn->fw_major == major &&
+ wcn->fw_minor == minor &&
+ wcn->fw_version == version &&
+ wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 61c302a6bdea..5b340769d5bb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
conn.channel = ch - 1;
- memcpy(conn.bssid, bss->bssid, 6);
- memcpy(conn.dst_mac, bss->bssid, 6);
+ memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+ memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
/*
* FW don't support scan after connection attempt
*/
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eb1dc7ad80fb..eeceab39cda2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, wil->csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index b827d51c30a3..0d950f209dae 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if (priv->wep_is_on)
frame_ctl |= IEEE80211_FCTL_PROTECTED;
if (priv->operating_mode == IW_MODE_ADHOC) {
- skb_copy_from_linear_data(skb, &header.addr1, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- memcpy(&header.addr3, priv->BSSID, 6);
+ skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
} else {
frame_ctl |= IEEE80211_FCTL_TODS;
- memcpy(&header.addr1, priv->CurrentBSSID, 6);
- memcpy(&header.addr2, dev->dev_addr, 6);
- skb_copy_from_linear_data(skb, &header.addr3, 6);
+ memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+ skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
}
if (priv->use_wpa)
- memcpy(&header.addr4, SNAP_RFC1024, 6);
+ memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
header.frame_control = cpu_to_le16(frame_ctl);
/* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
}
}
- memcpy(skbp, header->addr1, 6); /* destination address */
+ memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(&skbp[6], header->addr3, 6);
+ memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
else
- memcpy(&skbp[6], header->addr2, 6); /* source address */
+ memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
u8 frag_no, int more_frags)
{
- u8 mac4[6];
- u8 source[6];
+ u8 mac4[ETH_ALEN];
+ u8 source[ETH_ALEN];
struct sk_buff *skb;
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(source, header->addr3, 6);
+ memcpy(source, header->addr3, ETH_ALEN);
else
- memcpy(source, header->addr2, 6);
+ memcpy(source, header->addr2, ETH_ALEN);
rx_packet_loc += 24; /* skip header */
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
msdu_size -= 4;
if (frag_no == 0) { /* first fragment */
- atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
- msdu_size -= 6;
- rx_packet_loc += 6;
+ atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+ msdu_size -= ETH_ALEN;
+ rx_packet_loc += ETH_ALEN;
if (priv->do_rx_crc)
crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_seq = seq_no;
priv->frag_no = 1;
priv->frag_len = msdu_size;
- memcpy(priv->frag_source, source, 6);
- memcpy(&priv->rx_buf[6], source, 6);
- memcpy(priv->rx_buf, header->addr1, 6);
+ memcpy(priv->frag_source, source, ETH_ALEN);
+ memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+ memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
}
}
} else if (priv->frag_no == frag_no &&
priv->frag_seq == seq_no &&
- memcmp(priv->frag_source, source, 6) == 0) {
+ memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
more_frags = 1; /* don't send broken assembly */
}
}
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
priv->frag_no++;
if (!more_frags) { /* last one */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
priv->dev->stats.rx_dropped++;
} else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
/* we use the same buffer for frag reassembly and control packets */
- memset(priv->frag_source, 0xff, 6);
+ memset(priv->frag_source, 0xff, ETH_ALEN);
if (priv->do_rx_crc) {
/* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->last_qual = jiffies;
priv->last_beacon_timestamp = 0;
memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
- memset(priv->BSSID, 0, 6);
+ memset(priv->BSSID, 0, ETH_ALEN);
priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
priv->station_was_associated = 0;
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
- memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+ memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
for (i = 0; i < priv->BSS_list_entries; i++) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+ memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
static void atmel_scan(struct atmel_private *priv, int specific_ssid)
{
struct {
- u8 BSSID[6];
+ u8 BSSID[ETH_ALEN];
u8 SSID[MAX_SSID_LENGTH];
u8 scan_type;
u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
u8 SSID_size;
} cmd;
- memset(cmd.BSSID, 0xff, 6);
+ memset(cmd.BSSID, 0xff, ETH_ALEN);
if (priv->fast_scan) {
cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+ memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
cmd.channel = (priv->channel & 0x7f);
cmd.BSS_type = type;
cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->BSSID, 6);
+ memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
cmd.BSS_type = type;
cmd.channel = (priv->channel & 0x7f);
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
/* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
struct ass_req_format {
__le16 capability;
__le16 listen_interval;
- u8 ap[6]; /* nothing after here directly accessible */
+ u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
u8 ssid_el_id;
u8 ssid_len;
u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, 6);
- memcpy(header.addr2, priv->dev->dev_addr, 6);
- memcpy(header.addr3, priv->CurrentBSSID, 6);
+ memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+ memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+ memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
- memcpy(body.ap, priv->CurrentBSSID, 6);
+ memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
int i, index;
for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
- if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+ if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
index = i;
/* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
return;
index = priv->BSS_list_entries++;
- memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+ memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
priv->BSSinfo[index].RSSI = rssi;
} else {
if (rssi > priv->BSSinfo[index].RSSI)
@@ -3212,7 +3212,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
status != WLAN_STATUS_ASSOC_DENIED_RATES &&
status != WLAN_STATUS_CAPS_UNSUPPORTED &&
- priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+ priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->ReAssociationRequestRetryCnt++;
send_association_request(priv, 1);
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
{
struct bss_info *bss = &priv->BSSinfo[bss_index];
- memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+ memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
/* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
0x00, 0x04, 0x25, 0x00, 0x00, 0x00
};
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
- memcpy(dev->dev_addr, default_mac, 6);
+ memcpy(dev->dev_addr, default_mac, ETH_ALEN);
}
}
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
struct { /* NB this is matched to the hardware, don't change. */
u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
- u8 receiver_address[6];
+ u8 receiver_address[ETH_ALEN];
u8 wep_is_on;
u8 default_key; /* 0..3 */
u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
mib.wep_is_on = priv->wep_is_on;
mib.exclude_unencrypted = priv->exclude_unencrypted;
- memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+ memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
/* zero all the keys before adding in valid ones. */
memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 7c970d3ae358..05ee7f10cc8f 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
}
en_addr = en_addrs[override][i];
- val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+ if (e)
+ val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
if (off) {
b43_phy_mask(dev, en_addr, ~en_mask);
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a89083..4ae63f4ddfb2 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
else
txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb == rate) ||
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 849a28c80302..86588c9ff0f2 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
txhdr->mac_frame_ctl = wlhdr->frame_control;
- memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+ memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
/* Calculate duration for fallback rate */
if ((rate_fb->hw_value == rate) ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e13b1a65c65f..3e10b801eee8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -26,7 +26,6 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
#include <linux/platform_data/brcmfmac-sdio.h>
#include <defs.h>
@@ -239,7 +238,9 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num = SDIO_FUNC_1;
reg_size = 4;
- brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+ if (ret)
+ goto done;
}
do {
@@ -255,6 +256,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
func_num, addr, data, 4);
} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+done:
if (ret != 0)
brcmf_err("failed with %d\n", ret);
@@ -315,8 +317,36 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
*ret = retval;
}
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr, struct sk_buff *pkt)
+{
+ unsigned int req_sz;
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* Single skb use the standard mmc interface */
+ req_sz = pkt->len + 3;
+ req_sz &= (uint)~3;
+
+ if (write)
+ return sdio_memcpy_toio(sdiodev->func[fn], addr,
+ ((u8 *)(pkt->data)),
+ req_sz);
+ else if (fn == 1)
+ return sdio_memcpy_fromio(sdiodev->func[fn],
+ ((u8 *)(pkt->data)),
+ addr, req_sz);
+ else
+ /* function 2 read is FIFO operation */
+ return sdio_readsb(sdiodev->func[fn],
+ ((u8 *)(pkt->data)), addr,
+ req_sz);
+}
+
/**
- * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * brcmf_sdio_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
@@ -327,12 +357,13 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr, struct sk_buff_head *pktlist)
+static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+ bool write, u32 addr,
+ struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_sz, seg_sz;
+ unsigned int max_req_sz, orig_offset, dst_offset;
+ unsigned short max_seg_cnt, seg_sz;
unsigned char *pkt_data, *orig_data, *dst_data;
struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_data mmc_dat;
struct sg_table st;
struct scatterlist *sgl;
- struct mmc_host *host;
int ret = 0;
if (!pktlist->qlen)
@@ -351,27 +381,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Single skb use the standard mmc interface */
- if (pktlist->qlen == 1) {
- pkt_next = pktlist->next;
- req_sz = pkt_next->len + 3;
- req_sz &= (uint)~3;
-
- if (write)
- return sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt_next->data)),
- req_sz);
- else if (fn == 1)
- return sdio_memcpy_fromio(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)),
- addr, req_sz);
- else
- /* function 2 read is FIFO operation */
- return sdio_readsb(sdiodev->func[fn],
- ((u8 *)(pkt_next->data)), addr,
- req_sz);
- }
-
target_list = pktlist;
/* for host with broken sg support, prepare a page aligned list */
__skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
- host = sdiodev->func[fn]->card->host;
func_blk_sz = sdiodev->func[fn]->cur_blksize;
- /* Blocks per command is limited by host count, host transfer
- * size and the maximum for IO_RW_EXTENDED of 511 blocks.
- */
- max_blks = min_t(unsigned int, host->max_blk_count, 511u);
- max_req_sz = min_t(unsigned int, host->max_req_size,
- max_blks * func_blk_sz);
- max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
- max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+ max_req_sz = sdiodev->max_request_size;
+ max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+ target_list->qlen);
seg_sz = target_list->qlen;
pkt_offset = 0;
pkt_next = target_list->next;
- if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+ if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
ret = -ENOMEM;
goto exit;
}
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ mmc_dat.sg = st.sgl;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+ mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
+ mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1<<27; /* block mode */
+ /* for function 1 the addr will be incremented */
+ mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
- memset(&mmc_req, 0, sizeof(struct mmc_request));
- memset(&mmc_cmd, 0, sizeof(struct mmc_command));
- memset(&mmc_dat, 0, sizeof(struct mmc_data));
sgl = st.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
- if (sg_data_sz > host->max_seg_size)
- sg_data_sz = host->max_seg_size;
+ if (sg_data_sz > sdiodev->max_segment_size)
+ sg_data_sz = sdiodev->max_segment_size;
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
@@ -444,7 +461,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_next = pkt_next->next;
}
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
break;
}
seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
ret = -ENOTBLK;
goto exit;
}
- mmc_dat.sg = st.sgl;
+
mmc_dat.sg_len = sg_cnt;
- mmc_dat.blksz = func_blk_sz;
mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
- mmc_cmd.opcode = SD_IO_RW_EXTENDED;
- mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
- mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
- mmc_cmd.arg |= 1<<27; /* block mode */
- /* incrementing addr for function 1 */
- mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
- mmc_req.cmd = &mmc_cmd;
- mmc_req.data = &mmc_dat;
+ /* incrementing addr for function 1 */
if (fn == 1)
addr += req_sz;
mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
- mmc_wait_for_req(host, &mmc_req);
+ mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret != 0) {
@@ -546,7 +553,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
{
uint width;
int err = 0;
- struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
@@ -556,19 +562,17 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- skb_queue_head_init(&pkt_list);
- skb_queue_tail(&pkt_list, pkt);
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
done:
return err;
}
int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq)
+ uint flags, struct sk_buff_head *pktq, uint totlen)
{
- uint incr_fix;
+ struct sk_buff *glom_skb;
+ struct sk_buff *skb;
uint width;
int err = 0;
@@ -580,8 +584,22 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
- incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
- err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
+ if (pktq->qlen == 1)
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+ else if (!sdiodev->sg_support) {
+ glom_skb = brcmu_pkt_buf_get_skb(totlen);
+ if (!glom_skb)
+ return -ENOMEM;
+ err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+ if (err)
+ goto done;
+
+ skb_queue_walk(pktq, skb) {
+ memcpy(skb->data, glom_skb->data, skb->len);
+ skb_pull(glom_skb, skb->len);
+ }
+ } else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
done:
return err;
@@ -592,7 +610,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- struct sk_buff_head pktq;
+ uint width;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
- __skb_queue_head_init(&pktq);
- __skb_queue_tail(&pktq, mypkt);
- err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
- __skb_dequeue_tail(&pktq);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+
+ if (!err)
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@@ -617,16 +637,26 @@ int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff_head *pktq)
{
+ struct sk_buff *skb;
uint width;
- int err = 0;
+ int err;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- brcmf_sdio_addrprep(sdiodev, width, &addr);
+ err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+ if (err)
+ return err;
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
+ if (pktq->qlen == 1 || !sdiodev->sg_support)
+ skb_queue_walk(pktq, skb) {
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+ if (err)
+ break;
+ }
+ else
+ err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
return err;
}
@@ -639,7 +669,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
- struct sk_buff_head pkt_list;
dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return -EIO;
}
pkt->priority = 0;
- skb_queue_head_init(&pkt_list);
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
- skb_queue_tail(&pkt_list, pkt);
bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ sdaddr, pkt);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3462b75bd08..905704e335d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -21,6 +21,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/sched.h> /* request_irq() */
@@ -34,6 +35,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
@@ -41,13 +43,6 @@
#define DMA_ALIGN_MASK 0x03
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
-#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
-
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -58,7 +53,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -320,6 +316,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ struct mmc_host *host;
+ uint max_blocks;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -366,6 +364,20 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_err("F2 error, probe failed %d...\n", err);
goto fail;
}
+
+ /*
+ * determine host related variables after brcmf_sdio_probe()
+ * as func->cur_blksize is properly set and F2 init has been
+ * completed successfully.
+ */
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
brcmf_dbg(SDIO, "F2 init completed...\n");
return 0;
@@ -466,7 +478,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
- brcmfmac_sdio_pdata = pdev->dev.platform_data;
+ brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
if (brcmfmac_sdio_pdata->power_on)
brcmfmac_sdio_pdata->power_on();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 2eb9e642c9bf..899a2ada5b82 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -97,8 +97,6 @@
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_NULL 0xf
-#define BRCMF_EVENTING_MASK_LEN 16
-
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
@@ -632,29 +630,29 @@ struct brcmf_skb_reorder_data {
u8 *reorder;
};
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
/* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
- struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+ struct sk_buff *rxp);
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
- s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+ bool success);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 74156f84180c..a6eb09e5d46f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -132,35 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
* interface functions from common layer
*/
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
- struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+ int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
/* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
- bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
#ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
-extern void brcmf_sdio_register(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
#endif
#ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_register(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
#endif
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 40e7f854e10f..64e9cff241b9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -509,9 +509,8 @@ netif_rx:
}
}
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
{
- struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
- skb_queue_len(skb_list));
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- skb_queue_walk_safe(skb_list, skb, pnext) {
- skb_unlink(skb, skb_list);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
- ifp = drvr->iflist[ifidx];
-
- if (ret || !ifp || !ifp->ndev) {
- if ((ret != -ENODATA) && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return;
}
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+ else
+ brcmf_netif_rx(ifp, skb);
}
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index ef9179883748..53c6e710f2cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -22,21 +22,21 @@
*/
/* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
/* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
- struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+ struct sk_buff *txp);
/* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1aa75d5951b8..b02953c4ade7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -275,11 +275,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
struct work_struct datawork;
atomic_t dpc_tskcnt;
- const struct firmware *firmware;
- u32 fw_ptr;
-
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+ u32 chipid;
+ u32 revmsk;
+ const char *bin;
+ const char *nv;
+};
+
+enum brcmf_firmware_type {
+ BRCMF_FIRMWARE_BIN,
+ BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+ name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+ { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+ { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+ { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+ { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+ enum brcmf_firmware_type type)
+{
+ const struct firmware *fw;
+ const char *name;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+ if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+ name = brcmf_fwname_data[i].bin;
+ break;
+ case BRCMF_FIRMWARE_NVRAM:
+ name = brcmf_fwname_data[i].nv;
+ break;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+ goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+ bus->ci->chip, bus->ci->chiprev);
+ return NULL;
+
+found:
+ err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+ if ((err) || (!fw)) {
+ brcmf_err("fail to request firmware %s (%d)\n", name, err);
+ return NULL;
+ }
+
+ return fw;
+}
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -1061,6 +1147,8 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
+ trace_brcmf_sdpcm_hdr(false, header);
+
/* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1183,6 +1271,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
SDPCM_DOFFSET_MASK;
*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
*(((__le32 *)header) + 2) = 0;
+ trace_brcmf_sdpcm_hdr(true, header);
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1303,7 +1392,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
sdio_claim_host(bus->sdiodev->func[1]);
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, &bus->glom);
+ SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@@ -1406,13 +1495,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
+ skb_unlink(pfirst, &bus->glom);
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ bus->sdcnt.rxglompkts++;
}
- /* sent any remaining packets up */
- if (bus->glom.qlen)
- brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
- bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1557,7 +1645,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
- struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
@@ -1759,9 +1846,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
}
- skb_queue_head_init(&pktlist);
- skb_queue_tail(&pktlist, pkt);
- brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+ brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -1786,10 +1871,65 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
/* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG 0x10000
+#define ALIGN_SKB_FLAG 0x8000
/* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+ struct sk_buff_head *pktq,
+ struct sk_buff *pkt, uint chan)
+{
+ struct sk_buff *pkt_pad;
+ u16 tail_pad, tail_chop, sg_align;
+ unsigned int blksize;
+ u8 *dat_buf;
+ int ntail;
+
+ blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ sg_align = 4;
+ if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
+ sg_align = sdiodev->pdata->sd_sgentry_align;
+ /* sg entry alignment should be a divisor of block size */
+ WARN_ON(blksize % sg_align);
+
+ /* Check tail padding */
+ pkt_pad = NULL;
+ tail_chop = pkt->len % sg_align;
+ tail_pad = sg_align - tail_chop;
+ tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+ if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ if (pkt_pad == NULL)
+ return -ENOMEM;
+ memcpy(pkt_pad->data,
+ pkt->data + pkt->len - tail_chop,
+ tail_chop);
+ *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ skb_trim(pkt, pkt->len - tail_chop);
+ __skb_queue_after(pktq, pkt, pkt_pad);
+ } else {
+ ntail = pkt->data_len + tail_pad -
+ (pkt->end - pkt->tail);
+ if (skb_cloned(pkt) || ntail > 0)
+ if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+ return -ENOMEM;
+ if (skb_linearize(pkt))
+ return -ENOMEM;
+ dat_buf = (u8 *)(pkt->data);
+ __skb_put(pkt, tail_pad);
+ }
+
+ if (pkt_pad)
+ return pkt->len + tail_chop;
+ else
+ return pkt->len - tail_pad;
+}
+
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
@@ -1806,24 +1946,16 @@ static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
- u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
- int ntail;
- struct sk_buff *pkt_next, *pkt_new;
+ u16 head_pad, head_align;
+ struct sk_buff *pkt_next;
u8 *dat_buf;
- unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ int err;
struct brcmf_sdio_hdrinfo hd_info = {0};
/* SDIO ADMA requires at least 32 bit alignment */
head_align = 4;
- sg_align = 4;
- if (bus->sdiodev->pdata) {
- head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
- bus->sdiodev->pdata->sd_head_align : 4;
- sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
- bus->sdiodev->pdata->sd_sgentry_align : 4;
- }
- /* sg entry alignment should be a divisor of block size */
- WARN_ON(blksize % sg_align);
+ if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
+ head_align = bus->sdiodev->pdata->sd_head_align;
pkt_next = pktq->next;
dat_buf = (u8 *)(pkt_next->data);
@@ -1842,40 +1974,20 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
}
- /* Check tail padding */
- pkt_new = NULL;
- tail_chop = pkt_next->len % sg_align;
- tail_pad = sg_align - tail_chop;
- tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
- if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
- pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
- if (pkt_new == NULL)
- return -ENOMEM;
- memcpy(pkt_new->data,
- pkt_next->data + pkt_next->len - tail_chop,
- tail_chop);
- *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
- skb_trim(pkt_next, pkt_next->len - tail_chop);
- __skb_queue_after(pktq, pkt_next, pkt_new);
+ if (bus->sdiodev->sg_support && pktq->qlen > 1) {
+ err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
+ pkt_next, chan);
+ if (err < 0)
+ return err;
+ hd_info.len = (u16)err;
} else {
- ntail = pkt_next->data_len + tail_pad -
- (pkt_next->end - pkt_next->tail);
- if (skb_cloned(pkt_next) || ntail > 0)
- if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
- return -ENOMEM;
- if (skb_linearize(pkt_next))
- return -ENOMEM;
- dat_buf = (u8 *)(pkt_next->data);
- __skb_put(pkt_next, tail_pad);
+ hd_info.len = pkt_next->len;
}
- /* Now prep the header */
- if (pkt_new)
- hd_info.len = pkt_next->len + tail_chop;
- else
- hd_info.len = pkt_next->len - tail_pad;
hd_info.channel = chan;
hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+
+ /* Now fill the header */
brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
if (BRCMF_BYTES_ON() &&
@@ -1908,8 +2020,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
- if (dummy_flags & DUMMY_SKB_FLAG) {
- chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3149,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
return true;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
- if (bus->firmware->size < bus->fw_ptr + len)
- len = bus->firmware->size - bus->fw_ptr;
-
- memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
- bus->fw_ptr += len;
- return len;
-}
-
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
+ const struct firmware *fw;
+ int err;
int offset;
- uint len;
- u8 *memblock = NULL, *memptr;
- int ret;
- u8 idx;
-
- brcmf_dbg(INFO, "Enter\n");
-
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request firmware %d\n", ret);
- return ret;
- }
- bus->fw_ptr = 0;
-
- memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
- memptr += (BRCMF_SDALIGN -
- ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
- offset = bus->ci->rambase;
-
- /* Download image */
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != idx)
- memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
- while (len) {
- ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
- if (ret) {
+ int address;
+ int len;
+
+ fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+ if (fw == NULL)
+ return -ENOENT;
+
+ if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+ BRCMF_MAX_CORENUM)
+ memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+
+ err = 0;
+ offset = 0;
+ address = bus->ci->rambase;
+ while (offset < fw->size) {
+ len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+ fw->size - offset;
+ err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+ (u8 *)&fw->data[offset], len);
+ if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- ret, MEMBLOCK, offset);
- goto err;
+ err, len, address);
+ goto failure;
}
-
- offset += MEMBLOCK;
- len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+ offset += len;
+ address += len;
}
-err:
- kfree(memblock);
-
- release_firmware(bus->firmware);
- bus->fw_ptr = 0;
+failure:
+ release_firmware(fw);
- return ret;
+ return err;
}
/*
@@ -3111,7 +3197,8 @@ err:
* by two NULs.
*/
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+ const struct firmware *nv)
{
char *varbuf;
char *dp;
@@ -3120,12 +3207,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
int ret = 0;
uint buf_len, n, len;
- len = bus->firmware->size;
+ len = nv->size;
varbuf = vmalloc(len);
if (!varbuf)
return -ENOMEM;
- memcpy(varbuf, bus->firmware->data, len);
+ memcpy(varbuf, nv->data, len);
dp = varbuf;
findNewline = false;
@@ -3177,18 +3264,16 @@ err:
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
+ const struct firmware *nv;
int ret;
- ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
- &bus->sdiodev->func[2]->dev);
- if (ret) {
- brcmf_err("Fail to request nvram %d\n", ret);
- return ret;
- }
+ nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+ if (nv == NULL)
+ return -ENOENT;
- ret = brcmf_process_nvram_vars(bus);
+ ret = brcmf_process_nvram_vars(bus, nv);
- release_firmware(bus->firmware);
+ release_firmware(nv);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index e679214b3c98..14bc24dc5bae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -102,7 +102,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
};
#undef BRCMF_ENUM_DEF
+#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 82f9140f3d35..d0cd0bf95c5a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
+ * @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
+ u16 bus_flags;
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index ca72177388b9..2096a14ef1fb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return false;
regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* if core is already in reset, just return */
regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/*
* Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ if (idx == BRCMF_MAX_CORENUM)
+ return;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ ci->chiprev >= 2)
+ ci->chip = BCM4339_CHIP_ID;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
+ case BCM4339_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2e084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18005000;
+ ci->c_inf[1].wrapbase = 0x18105000;
+ ci->c_inf[1].cib = 0x15004211;
+ ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+ ci->c_inf[2].base = 0x18002000;
+ ci->c_inf[2].wrapbase = 0x18102000;
+ ci->c_inf[2].cib = 0x04084411;
+ ci->ramsize = 0xc0000;
+ ci->rambase = 0x180000;
+ break;
default:
brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 83c041f1bf4a..507c61c991fa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,6 +54,14 @@
#define BRCMF_MAX_CORENUM 6
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+
struct chip_core_info {
u16 id;
u16 rev;
@@ -215,17 +223,16 @@ struct sdpcmd_regs {
u16 PAD[0x80];
};
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci,
- u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, char *nvram_dat,
- uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, char *nvram_dat,
+ uint nvram_sz);
#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 2b5407f002e5..fc0d4f0129db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -178,21 +178,25 @@ struct brcmf_sdio_dev {
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
bool irq_wake; /* irq wake enable flags */
+ bool sg_support;
+ uint max_request_size;
+ ushort max_segment_count;
+ uint max_segment_size;
};
/* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+ int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
@@ -206,22 +210,17 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* Returns 0 or error code.
* NOTE: Async operation is not currently supported.
*/
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq, uint totlen);
/* Flags bits */
@@ -237,46 +236,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
* nbytes: number of bytes to transfer to/from buf
* Returns 0 or error code.
*/
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
- u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
- u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+ u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+ u8 *data, uint size);
/* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
/* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
/* attach, return handler on success, NULL if failed.
* The handler shall be provided by all subsequent calls. No local cache
* cfghdl points to the starting address of pci device mapped memory
*/
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
/* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
- uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u8 *byte);
/* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
- uint rw, uint fnc, uint addr,
- u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+ uint addr, u32 *word, uint nbyte);
/* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
- bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
- wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+ wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index bc2917112899..3c67529b9074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -78,13 +78,15 @@ TRACE_EVENT(brcmf_hexdump,
TP_ARGS(data, len),
TP_STRUCT__entry(
__field(unsigned long, len)
+ __field(unsigned long, addr)
__dynamic_array(u8, hdata, len)
),
TP_fast_assign(
__entry->len = len;
+ __entry->addr = (unsigned long)data;
memcpy(__get_dynamic_array(hdata), data, len);
),
- TP_printk("hexdump [length=%lu]", __entry->len)
+ TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
);
TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@ TRACE_EVENT(brcmf_bdchdr,
TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
+TRACE_EVENT(brcmf_sdpcm_hdr,
+ TP_PROTO(bool tx, void *data),
+ TP_ARGS(tx, data),
+ TP_STRUCT__entry(
+ __field(u8, tx)
+ __field(u16, len)
+ __array(u8, hdr, 12)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->hdr, data, 12);
+ __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
+ __entry->tx = tx ? 1 : 0;
+ ),
+ TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
+ __entry->len, __entry->hdr[4])
+);
+
#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index f4aea47e0730..422f44c63175 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- skb_queue_head_init(&skbq);
- skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- brcmf_rx_frames(devinfo->dev, &skbq);
+ brcmf_rx_frame(devinfo->dev, skb);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index a8a267b5b87a..2d08c155c23b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -172,19 +172,19 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 73d01e586109..03bdcf29bd50 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
u16 dma_len;
};
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
- struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
- struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+ struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+ struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
#endif /* _BRCM_AMPDU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
index 97ea3881a8ec..a3d487ab1964 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.h
@@ -17,13 +17,11 @@
#ifndef _BRCM_ANTSEL_H_
#define _BRCM_ANTSEL_H_
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
- bool sel,
- u8 id, u8 fbid, u8 *antcfg,
- u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+ u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
#endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 006483a0abe6..39dd3a5b2979 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -32,20 +32,16 @@
#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
- u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
- u16 chanspec,
- u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+ u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 4090032e81a2..198053dfc310 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -88,26 +88,26 @@ struct brcms_info {
};
/* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
- bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
/* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 4608e0eb1493..8138f1cff4e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
if (!is_zero_ether_addr(sprom->il0mac)) {
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
return;
}
if (wlc_hw->_nbands > 1)
- memcpy(etheraddr, sprom->et1mac, 6);
+ memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
else
- memcpy(etheraddr, sprom->il0mac, 6);
+ memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
}
/* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
- if (device == BCM4313_D11N2G_ID)
+ if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index b5d7a38b53fe..c4d135cff04a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
struct brcms_bss_info *current_bss;
};
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
- struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
- uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
- uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
- u32 rspec,
- bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
- u32 rts_rate,
- u32 frame_rate,
- u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len,
- bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
- struct ieee80211_sta *sta,
- void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
- u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
- u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
- u16 chanspec,
- bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
- u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
- u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
- u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
- int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
- uint offset, const void *buf, int len,
- u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
- void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
- u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+ bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ u32 rts_rate, u32 frame_rate,
+ u8 rts_preamble_type, u8 frame_preamble_type,
+ uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+ struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+ bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+ int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+ int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index e34a71e7d242..4d3734f48d9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -179,121 +179,106 @@ struct shared_phy_params {
};
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct bcma_device *d11core,
- int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
- u16 *phyrev, u16 *radioid,
- u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
- u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
- u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
- struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
- bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
- struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
- uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
- u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
- uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
- uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *,
- u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
- bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
- bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
- bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
- u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
- u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
- struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
- u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
- bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
- u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+ struct bcma_device *d11core, int bandtype,
+ struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+ u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+ bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+ struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+ u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+ u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+ s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+ u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+ struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+ u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 1dc767c31653..4960f7d26804 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
u8 do_init_g;
};
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
- u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi,
- u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *ptbl_info,
- u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
- uint tbl_offset, u16 tblAddr, u16 tblDataHi,
- u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
- struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
- const struct radio_regs *radioregs,
- u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
- s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
- u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
- u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
- u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
- u8 rate_mcs_end,
- u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
- u8 rate_ofdm_start,
- u8 rate_ofdm_end,
- u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+ u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+ struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+ const struct radio_regs *radioregs,
+ u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+ bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+ u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+ u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+ u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
#define LCNPHY_TX_POWER_TABLE_SIZE 128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
- const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
- struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
- u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
- s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+ u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
#define NPHY_MAX_HPVGA1_INDEX 10
#define NPHY_DEF_HPVGA1_INDEXLIMIT 7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
u32 q_pwr;
};
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
- bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_phy_write_table_nphy(pi, pti) \
wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_nphy_table_data_write(pi, w, v) \
wlc_phy_table_data_write((pi), (w), (v))
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
- u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
- u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+ void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+ const void *);
#define PHY_IPA(pi) \
((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
- u16 num_samps, u8 wait_time,
- u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
- struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
- u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
- struct nphy_txgains target_gain,
- u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
- s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
- s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
- s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
- u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
- u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
- struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+ u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+ struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+ struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+ u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+ s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+ s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+ bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+ u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+ u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
#define NPHY_TESTPATTERN_BPHY_EVM 0
#define NPHY_TESTPATTERN_BPHY_RFCS 1
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
- u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 2c5b66b75970..dd8774717ade 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -124,56 +124,49 @@
struct brcms_phy;
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
- struct brcms_info *wl,
- struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ struct brcms_info *wl,
+ struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn) (struct brcms_phy *pi),
- void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
- u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
- u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
- u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
- u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
- physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
- int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
- u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
- void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
- const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
- u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn)(struct brcms_phy *pi),
+ void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+ int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+ int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+ int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+ int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 20e2012d5a3a..a014bbc4f935 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -20,7 +20,7 @@
#include "types.h"
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index d36ea5e1cc49..4da38cb4f318 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
};
/* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
- bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
- struct sk_buff *sdu,
- struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
- int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
- int match_reg_offset,
- const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
- const struct ieee80211_tx_queue_params *arg,
- bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
- struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
- u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
- const char *name, struct brcms_info *hdl,
- int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
- struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+ struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+ const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+ const struct ieee80211_tx_queue_params *arg,
+ bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+ u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl,
+ int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+ struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
- struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
- u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
- const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
- struct sk_buff *beacon, u16 tim_offset,
- u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
- struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
- size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+ u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+ u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+ struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index 980d578825cc..5bb88b78ed64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
/* sanitize, and sort a rateset with the basic bit(s) preserved, validate
* rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
- const struct brcms_c_rateset *hw_rs,
- bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+ const struct brcms_c_rateset *hw_rs,
+ bool check_brate, u8 txstreams);
/* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst);
/* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
- struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
- bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
- const struct brcms_c_rateset *rs_hw, uint phy_type,
- int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
- u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
- u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+ struct brcms_c_rateset *dst, bool basic_only,
+ u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+ const struct brcms_c_rateset *rs_hw, uint phy_type,
+ int bandtype, bool cck_only, uint rate_mask,
+ bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
index 19f6580f69be..ba9493009a33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.h
@@ -19,24 +19,19 @@
#include "types.h"
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
- u16 *ss_algo_channel,
- u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
- struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
- bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
- u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
- u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+ u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
#endif /* _BRCM_STF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
index 18750a814b4f..c87dd89bcb78 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
@@ -43,16 +43,14 @@ struct brcms_ucode {
u32 *bcm43xx_bomminor;
};
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
- unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
- unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+ unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int brcms_check_firmwares(struct brcms_info *wl);
#endif /* _BRCM_UCODE_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index c1fe245bb07e..84113ea16f84 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,5 +41,6 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
+#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 92623f02b1c0..8660a2cba098 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
void (*decchspec)(struct brcmu_chan *ch);
};
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
#endif /* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 898cacb8d01d..8ba445b3fd72 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
return skb_peek_tail(&pq->q[prec].skblist);
}
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
- bool (*match_fn)(struct sk_buff *p,
- void *arg),
- void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+ bool (*match_fn)(struct sk_buff *p,
+ void *arg),
+ void *arg);
/* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
/* Empty the queue at particular precedence level */
/* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
- bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* operations on a set of precedences in packet queue */
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
/* operations on packet queue as a whole */
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
return pq->len == 0;
}
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
- bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
/* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
/* externs */
/* format/print */
#ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* DEBUG */
#ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
#else
__printf(3, 4)
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 755a0c8edfe1..40078f5f932e 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -365,7 +365,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
static int cw1200_spi_probe(struct spi_device *func)
{
const struct cw1200_platform_data_spi *plat_data =
- func->dev.platform_data;
+ dev_get_platdata(&func->dev);
struct hwbus_priv *self;
int status;
@@ -443,7 +443,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
kfree(self);
}
- cw1200_spi_off(func->dev.platform_data);
+ cw1200_spi_off(dev_get_platdata(&func->dev));
return 0;
}
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 970a48baaf80..de7c4ffec309 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
}
}
- memcpy(req.bssid, selected->bssid, 6);
+ memcpy(req.bssid, selected->bssid, ETH_ALEN);
req.channel = selected->chid;
spin_unlock_irqrestore(&local->lock, flags);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6b823a1ab789..81903e33d5b1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
/* data's copy of the eeprom data */
static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
{
- memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+ memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
}
static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
out_free_libipw:
free_libipw(priv->net_dev, 0);
out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
iounmap(priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
/* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 6eede52ad8c0..5ce2f59d3378 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
}
/* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
-extern void libipw_networks_age(struct libipw_device *ieee,
- unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
/* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
- struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
/* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
- struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
- struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+ struct libipw_rx_stats *rx_stats);
/* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
- struct libipw_hdr_4addr *header,
- struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+ struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
- *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
- const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
- u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
- u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
- u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
- libipw_device
- *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
- u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+ u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
/* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+ struct iw_request_info *info, union iwreq_data *wrqu,
+ char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
static inline void libipw_increment_scans(struct libipw_device *ieee)
{
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 9581d07a4242..dea3b50d68b9 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3811,7 +3811,6 @@ out_iounmap:
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il_free_channel_map(il);
il_free_geos(il);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 9a8703def0ba..00030d43a194 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
* for use by iwl-*.c
*
*****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
- struct ieee80211_hdr *hdr,
- int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
- char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+ bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
/******************************************************************************
*
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
* il3945_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
- struct il_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset,
- u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
- struct il3945_frame *frame,
- u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame, u8 rate);
void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
/**
* il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
* not yet been merged into a single common layer for managing the
* station tables.
*/
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
/* scanning */
int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 5ab50a5b48b1..3982ab76f375 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6706,7 +6706,6 @@ out_free_eeprom:
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1b15b0b2292b..337dfcf3bbde 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
((t) < IL_TX_POWER_TEMPERATURE_MIN || \
(t) > IL_TX_POWER_TEMPERATURE_MAX)
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
/********************* END TEMPERATURE ***************************************/
/********************* START TXPOWER *****************************************/
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 83f8ed8a5528..ad123d66ab6c 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -858,9 +858,9 @@ struct il_hw_params {
* il4965_mac_ <-- mac80211 callback
*
****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
static inline int
il_queue_used(const struct il_queue *q, int i)
{
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
#ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
#else
static inline void
il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
/*****************************************************
* TX
******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
/*****************************************************
* TX power
****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
return il_is_ready(il);
}
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
int il_apm_init(struct il_priv *il);
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
irqreturn_t il_isr(int irq, void *data);
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
* The specific throughput table used is based on the type of network
* the associated with, including A, B, G, and G w/ TGG protection
*/
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
/* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
- u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
* ieee80211_register_hw
*
*/
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
/**
* il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
extern u32 il_debug_level;
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index f2a86ffc3b4c..23d5f0275ce9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
return cpu_to_le32(flags|(u32)rate);
}
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a79fdd137f95..7434d9edf3b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
* iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
*
****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
#define IWL_OPERATION_MODE_AUTO 0
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 5d83cab22d62..26fc550cd68c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
/* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ u8 sta_id);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
* ieee80211_register_hw
*
*/
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da442b81370a..1fef5240e6ad 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
+ txq_id = info->hw_queue;
+
if (is_agg)
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
/*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- txq_id = ctx->mcast_queue;
-
- /*
* The microcode will clear the more data
* bit in the last frame it transmits.
*/
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- txq_id = IWL_AUX_QUEUE;
- else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+ }
- WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
WARN_ON_ONCE(is_agg &&
priv->queue_to_mac80211[txq_id] != info->hw_queue);
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 86270b69cd02..63637949a146 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -330,15 +330,14 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type old_type;
static const u8 alive_cmd[] = { REPLY_ALIVE };
- old_type = priv->cur_ucode;
- priv->cur_ucode = ucode_type;
fw = iwl_get_ucode_image(priv, ucode_type);
+ if (WARN_ON(!fw))
+ return -EINVAL;
+ old_type = priv->cur_ucode;
+ priv->cur_ucode = ucode_type;
priv->ucode_loaded = false;
- if (!fw)
- return -EINVAL;
-
iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 76e14c046d94..85879dbaa402 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -83,6 +83,8 @@
#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3160_NVM_VERSION 0x709
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL7265_NVM_VERSION 0x0a1d
+#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@ const struct iwl_cfg iwl3160_n_cfg = {
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7265_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 7265",
+ .fw_name_pre = IWL7265_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7265_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b03c25e14903..18f232e8e812 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -293,6 +293,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
extern const struct iwl_cfg iwl3160_2n_cfg;
extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a276af476e2d..54a4fdc631b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -394,6 +394,38 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+/* SECURE boot registers */
+#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
+#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
+enum secure_boot_status_reg {
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
+enum secure_load_status_reg {
+ CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
+ CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
+#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+
+#define CSR_SECURE_TIME_OUT (100)
+
+#define FH_TCSR_0_REG0 (0x1D00)
+
/*
* HBUS (Host-side Bus)
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 99e1da3123c9..ff570027e9dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -483,6 +483,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const u8 *tlv_data;
char buildstr[25];
u32 build;
+ int num_of_cpus;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_SECURE_SEC_RT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_INIT:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_INIT].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+ iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+ tlv_len);
+ drv->fw.mvm_fw = true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
+ break;
+ case IWL_UCODE_TLV_NUM_OF_CPU:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ num_of_cpus =
+ le32_to_cpup((__le32 *)tlv_data);
+
+ if (num_of_cpus == 2) {
+ drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+ true;
+ drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+ true;
+ } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+ IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+ return -EINVAL;
+ }
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 8b6c6fd95ed0..6c6c35c5228c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -121,6 +121,10 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SEC_WOWLAN = 21,
IWL_UCODE_TLV_DEF_CALIB = 22,
IWL_UCODE_TLV_PHY_SKU = 23,
+ IWL_UCODE_TLV_SECURE_SEC_RT = 24,
+ IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
+ IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+ IWL_UCODE_TLV_NUM_OF_CPU = 27,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index a1223680bc70..75db087120c3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -75,11 +75,23 @@
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ * connection when going back to D0
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ * containing CAM (Continuous Active Mode) indication.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -87,11 +99,21 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
+ IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
+ IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
};
/* The default calibrate table size if not specified by firmware file */
@@ -133,7 +155,8 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 4
+#define IWL_UCODE_SECTION_MAX 6
+#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -150,6 +173,8 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+ bool is_secure;
+ bool is_dual_cpus;
};
/* uCode version contains 4 values: Major/Minor/API/Serial */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index dfa4d2e3aaa2..ad8e19a56eca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -34,7 +34,6 @@
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-fh.h"
-#include "iwl-csr.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ff8cc75c189d..a70c7b9d9bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,6 +97,8 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_RTC_INT_STT_RFKILL (0x10000000)
+
/* Device system time */
#define DEVICE_SYSTEM_TIME_REG 0xA0206C
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 80b47508647c..143292b4dbbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -344,7 +344,7 @@ struct iwl_trans_config {
u8 cmd_queue;
u8 cmd_fifo;
const u8 *no_reclaim_cmds;
- int n_no_reclaim_cmds;
+ unsigned int n_no_reclaim_cmds;
bool rx_buf_size_8k;
bool bc_table_dword;
@@ -601,7 +601,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
{
int ret;
- if (trans->state != IWL_TRANS_FW_ALIVE) {
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return -EIO;
}
@@ -640,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
@@ -649,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->reclaim(trans, queue, ssn, skbs);
}
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_disable(trans, queue);
}
@@ -669,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
{
might_sleep();
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
@@ -685,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
- WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
- "%s bad state = %d", __func__, trans->state);
+ if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+ IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->wait_tx_queue_empty(trans);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b85f60..5d066cbc5ac7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -98,126 +98,258 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#undef EVENT_PRIO_ANT
-/* BT Antenna Coupling Threshold (dB) */
-#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
-#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3)
-
#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
-#define BT_REDUCED_TX_POWER_BIT BIT(7)
-
-static inline bool is_loose_coex(void)
-{
- return iwlwifi_mod_params.ant_coupling >
- IWL_BT_ANTENNA_COUPLING_THRESHOLD;
-}
+#define BT_ANTENNA_COUPLING_THRESHOLD (30)
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
+
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
}
-static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
-{
- struct iwl_bt_coex_prot_env_cmd env_cmd;
- int ret;
-
- env_cmd.action = action;
- env_cmd.type = type;
- ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
- sizeof(env_cmd), &env_cmd);
- if (ret)
- IWL_ERR(mvm, "failed to send BT env command\n");
- return ret;
-}
-
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_SCO_HID_A2DP,
- BT_KILL_MSK_REDUCED_TXPOW,
- BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xffff0000,
[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
[BT_KILL_MSK_REDUCED_TXPOW] = 0,
};
-#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
-
-/* Tight Coex */
-static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+ cpu_to_le32(0xf0f0f0f0),
+ cpu_to_le32(0xc0c0c0c0),
+ cpu_to_le32(0xfcfcfcfc),
+ cpu_to_le32(0xff00ff00),
};
-/* Loose Coex */
-static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x40000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x44000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0xf0005000),
+ },
};
-/* Full concurrency */
-static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+ {
+ /* Tight */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Loose */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+ },
+ {
+ /* Tx Tx disabled */
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xC0004000),
+ cpu_to_le32(0xF0005000),
+ cpu_to_le32(0xF0005000),
+ },
};
-/* single shared antenna */
-static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
- cpu_to_le32(0xC0004000),
- cpu_to_le32(0xF0005000),
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+ /* dummy entry for channel 0 */
+ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+ {
+ cpu_to_le64(0x0000001FFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000000FFFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000003FFFCULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ },
+ {
+ cpu_to_le64(0x00001FFFE0ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ },
+ {
+ cpu_to_le64(0x00007FFF80ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ },
+ {
+ cpu_to_le64(0x0003FFFC00ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ },
+ {
+ cpu_to_le64(0x000FFFF000ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ },
+ {
+ cpu_to_le64(0x007FFF8000ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ },
+ {
+ cpu_to_le64(0x01FFFE0000ULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ },
+ {
+ cpu_to_le64(0x0FFFF00000ULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ cpu_to_le64(0x0ULL),
+ },
+ {
+ cpu_to_le64(0x3FFFC00000ULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFFE000000ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFF8000000ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0)
+ },
};
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+ cpu_to_le32(0x22002200),
+ cpu_to_le32(0x33113311),
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum iwl_bt_coex_lut_type ret;
+ u16 phy_ctx_id;
+
+ /*
+ * Checking that we hold mvm->mutex is a good idea, but the rate
+ * control can't acquire the mutex since it runs in Tx path.
+ * So this is racy in that case, but in the worst case, the AMPDU
+ * size limit will be wrong for a short time which is not a big
+ * issue.
+ */
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return BT_COEX_LOOSE_LUT;
+ }
+
+ ret = BT_COEX_TX_DIS_LUT;
+
+ if (mvm->cfg->bt_shared_single_ant) {
+ rcu_read_unlock();
+ return ret;
+ }
+
+ phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+ if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+ else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+ /* else - default = TX TX disallowed */
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
.flags = CMD_SYNC,
};
int ret;
+ u32 flags;
- /* go to CALIB state in internal BT-Coex state machine */
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
-
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -246,40 +371,52 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt3_time_t7_value = 1;
- bt_cmd->bt3_prio_sample_time = 2;
- bt_cmd->bt3_timer_t2_value = 0xc;
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+ bt_cmd->bt4_tx_rx_max_freq0 = 15,
- bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
+ flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+ flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
+ bt_cmd->flags = cpu_to_le32(flags);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
BT_VALID_BT_PRIO_BOOST |
BT_VALID_MAX_KILL |
BT_VALID_3W_TMRS |
BT_VALID_KILL_ACK |
BT_VALID_KILL_CTS |
BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT);
+ BT_VALID_LUT |
+ BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+ BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+ BT_VALID_MULTI_PRIO_LUT |
+ BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40 |
+ BT_VALID_ANT_ISOLATION |
+ BT_VALID_ANT_ISOLATION_THRS |
+ BT_VALID_TXTX_DELTA_FREQ_THRS |
+ BT_VALID_TXRX_MAX_FREQ_0);
if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
- sizeof(iwl_single_shared_ant_lookup));
- else if (is_loose_coex())
- memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+ sizeof(iwl_single_shared_ant));
else
- memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
- sizeof(iwl_tight_lookup));
+ memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+ sizeof(iwl_combined_lookup));
- bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+ sizeof(iwl_bt_prio_boost));
+ memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+ sizeof(iwl_bt_mprio_lut));
bt_cmd->kill_ack_msk =
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
bt_cmd->kill_cts_msk =
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -334,13 +471,17 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
- bt_cmd->valid_bit_msk =
- cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS);
- IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
+ IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+ iwl_bt_ack_kill_msk[bt_kill_msk],
+ iwl_bt_cts_kill_msk[bt_kill_msk]);
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -364,12 +505,16 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
int ret;
- /* This can happen if the station has been removed right now */
if (sta_id == IWL_MVM_STATION_COUNT)
return 0;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return 0;
+
mvmsta = (void *)sta->drv_priv;
/* nothing to do */
@@ -380,8 +525,10 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (!bt_cmd)
return -ENOMEM;
cmd.data[0] = bt_cmd;
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
- bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+ bt_cmd->valid_bit_msk =
+ cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
bt_cmd->bt_reduced_tx_power = sta_id;
if (enable)
@@ -403,8 +550,25 @@ struct iwl_bt_iterator_data {
struct iwl_mvm *mvm;
u32 num_bss_ifaces;
bool reduced_tx_power;
+ struct ieee80211_chanctx_conf *primary;
+ struct ieee80211_chanctx_conf *secondary;
};
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, int rssi)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->bf_data.last_bt_coex_event = rssi;
+ mvmvif->bf_data.bt_coex_max_thold =
+ enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+ mvmvif->bf_data.bt_coex_min_thold =
+ enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -413,65 +577,94 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
- enum ieee80211_band band;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf && chanctx_conf->def.chan)
- band = chanctx_conf->def.chan->band;
- else
- band = -1;
- rcu_read_unlock();
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- /* non associated BSSes aren't to be considered */
- if (!vif->bss_conf.assoc)
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ /* If channel context is invalid or not on 2.4GHz .. */
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+ /* ... and it is an associated STATION, relax constraints */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
+
+ /* SoftAP / GO will always be primary */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- if (band != IEEE80211_BAND_2GHZ) {
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+
+ if (chanctx_conf == data->primary)
+ return;
+
+ /* downgrade the current primary no matter what its type is */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
return;
}
- if (data->notif->bt_status)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
+ data->num_bss_ifaces++;
+
+ /* we are now a STA / P2P Client, and take associated ones only */
+ if (!vif->bss_conf.assoc)
+ return;
+
+ /* STA / P2P Client, try to be primary if first vif */
+ if (!data->primary || data->primary == chanctx_conf)
+ data->primary = chanctx_conf;
+ else if (!data->secondary)
+ /* if secondary is not NULL, it might be a GO */
+ data->secondary = chanctx_conf;
- if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
+ if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
smps_mode = IEEE80211_SMPS_STATIC;
+ else if (le32_to_cpu(data->notif->bt_activity_grading) >=
+ BT_LOW_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status,
- data->notif->bt_traffic_load, smps_mode);
+ data->notif->bt_activity_grading, smps_mode);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
/* don't reduce the Tx power if in loose scheme */
- if (is_loose_coex())
+ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+ mvm->cfg->bt_shared_single_ant) {
+ data->reduced_tx_power = false;
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
+ }
- data->num_bss_ifaces++;
-
- /* reduced Txpower only if there are open BT connections, so ...*/
- if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
+ /* reduced Txpower only if BT is on, so ...*/
+ if (!data->notif->bt_status) {
/* ... cancel reduced Tx power ... */
if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
data->reduced_tx_power = false;
/* ... and there is no need to get reports on RSSI any more. */
- ieee80211_disable_rssi_reports(vif);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- ave_rssi = ieee80211_ave_rssi(vif);
+ /* try to get the avg rssi from fw */
+ ave_rssi = mvmvif->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -499,8 +692,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
- BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
}
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +702,72 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
.notif = &mvm->last_bt_notif,
.reduced_tx_power = true,
};
+ struct iwl_bt_coex_ci_cmd cmd = {};
+ u8 ci_bw_idx;
+ rcu_read_lock();
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (data.primary) {
+ struct ieee80211_chanctx_conf *chan = data.primary;
+ if (WARN_ON(!chan->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_primary = 0;
+ } else {
+ cmd.co_run_bw_primary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_primary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+ }
+
+ if (data.secondary) {
+ struct ieee80211_chanctx_conf *chan = data.secondary;
+ if (WARN_ON(!data.secondary->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ cmd.co_run_bw_secondary = 0;
+ } else {
+ cmd.co_run_bw_secondary = 1;
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_secondary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+ }
+
+ rcu_read_unlock();
+
+ /* Don't spam the fw with the same command over and over */
+ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send BT_CI cmd");
+ memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+ }
+
/*
* If there are no BSS / P2P client interfaces, reduced Tx Power is
* irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +789,18 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+ IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+ notif->bt_status ? "ON" : "OFF");
IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
- IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
notif->bt_agg_traffic_load);
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
/* remember this notification for future use: rssi fluctuations */
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +824,18 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ /* If channel context is invalid or not on 2.4GHz - don't count it */
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
if (vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
@@ -594,15 +865,15 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
};
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
/* Rssi update while not associated ?! */
if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
- goto out_unlock;
+ return;
- /* No open connection - reports should be disabled */
- if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
- goto out_unlock;
+ /* No BT - reports should be disabled */
+ if (!mvm->last_bt_notif.bt_status)
+ return;
IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +882,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
- if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
+ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
false);
else
@@ -633,12 +905,52 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
- out_unlock:
- mutex_unlock(&mvm->mutex);
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
+
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_LOW_TRAFFIC)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+ if (lut_type == BT_COEX_LOOSE_LUT)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ /* tight coex, high bt traffic, reduce AGG time limit */
+ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
}
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return true;
+
+ /*
+ * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+ * already killed.
+ * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+ * Tx.
+ */
+ return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+ return;
+
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 2bf29f7992ee..4b6d670c3509 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -70,7 +70,9 @@
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
-#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 417639f77b01..6f45966817bb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -67,6 +67,7 @@
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/addrconf.h>
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
@@ -381,14 +382,74 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ if (mvmvif->num_target_ipv6_addrs)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+ else
+ cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
}
#endif
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
@@ -438,8 +505,8 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
common->enabled = cpu_to_le32(enabled);
- return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
- size, &cmd);
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NON_QOS_TX_COUNTER_CMD,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ int err;
+ u32 size;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
+ }
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ if (err)
+ return err;
+
+ size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ size -= sizeof(cmd.resp_pkt->hdr);
+ if (size < sizeof(__le16)) {
+ err = -EINVAL;
+ } else {
+ err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+ /* new API returns next, not last-used seqno */
+ if (mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ err -= 0x10;
+ }
+
+ iwl_free_resp(&cmd);
+ return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .value = cpu_to_le16(mvmvif->seqno),
+ };
+
+ /* return if called during restart, not resume from D3 */
+ if (!mvmvif->seqno_valid)
+ return;
+
+ mvmvif->seqno_valid = false;
+
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
+ return;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+ sizeof(query_cmd), &query_cmd))
+ IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@@ -829,7 +964,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
- u16 seq;
u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
@@ -872,26 +1006,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
- wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+ /* Query the last used seqno and set it */
+ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+ if (ret < 0)
+ goto out_noreset;
+ wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- seq = mvm_ap_sta->tid_data[i].seq_number;
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
@@ -945,6 +1068,16 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
+ * The D3 firmware still hardcodes the AP station ID for the
+ * BSS we're associated with as 0. Store the real STA ID here
+ * and assign 0. When we leave this function, we'll restore
+ * the original value for the resume code.
+ */
+ old_ap_sta_id = mvm_ap_sta->sta_id;
+ mvm_ap_sta->sta_id = 0;
+ mvmvif->ap_sta_id = 0;
+
+ /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1059,6 +1192,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto out;
+
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
goto out;
@@ -1109,16 +1246,26 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+ u16 pattern_number;
+ u16 qos_seq_ctr[8];
+ u32 wakeup_reasons;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ const u8 *wake_packet;
+};
+
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status *status)
+ struct iwl_wowlan_status_data *status)
{
struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- u32 reasons = le32_to_cpu(status->wakeup_reasons);
+ u32 reasons = status->wakeup_reasons;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@@ -1130,7 +1277,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx =
- le16_to_cpu(status->pattern_number);
+ status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
wakeup.tcp_match = true;
if (status->wake_packet_bufsize) {
- int pktsize = le32_to_cpu(status->wake_packet_bufsize);
- int pktlen = le32_to_cpu(status->wake_packet_length);
+ int pktsize = status->wake_packet_bufsize;
+ int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
struct ieee80211_hdr *hdr = (void *)pktdata;
int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
kfree_skb(pkt);
}
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ u64 pn;
+
+ pn = le64_to_cpu(sc->pn);
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+ seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_v6 *status)
+{
+ union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+ struct iwl_wowlan_status_v6 *status;
+ void *last_gtk;
+ u32 cipher;
+ bool find_phase, unhandled_cipher;
+ int num_keys;
+};
+
+static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we support these */
+ break;
+ default:
+ /* everything else (even CMAC for MFP) - disconnect from AP */
+ data->unhandled_cipher = true;
+ return;
+ }
+
+ data->num_keys++;
+
+ /*
+ * pairwise key - update sequence counters only;
+ * note that this assumes no TDLS sessions are active
+ */
+ if (sta) {
+ struct ieee80211_key_seq seq = {};
+ union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+ if (data->find_phase)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
+ iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+ iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+ break;
+ }
+ ieee80211_set_key_tx_seq(key, &seq);
+
+ /* that's it for this key */
+ return;
+ }
+
+ if (data->find_phase) {
+ data->last_gtk = key;
+ data->cipher = key->cipher;
+ return;
+ }
+
+ if (data->status->num_of_gtk_rekeys)
+ ieee80211_remove_key(key);
+ else if (data->last_gtk == key)
+ iwl_mvm_set_key_rx_seq(key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_v6 *status)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .status = status,
+ };
+
+ if (!status || !vif->bss_conf.bssid)
+ return false;
+
+ /* find last GTK that we used initially, if any */
+ gtkdata.find_phase = true;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+ /* not trying to keep connections with MFP/unhandled ciphers */
+ if (gtkdata.unhandled_cipher)
+ return false;
+ if (!gtkdata.num_keys)
+ return true;
+ if (!gtkdata.last_gtk)
+ return false;
+
+ /*
+ * invalidate all other GTKs that might still exist and update
+ * the one that we used
+ */
+ gtkdata.find_phase = false;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_gtks, &gtkdata);
+
+ if (status->num_of_gtk_rekeys) {
+ struct ieee80211_key_conf *key;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {
+ .conf.cipher = gtkdata.cipher,
+ .conf.keyidx = status->gtk.key_index,
+ };
+
+ switch (gtkdata.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key,
+ WLAN_KEY_LEN_CCMP);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+ /* leave TX MIC key zeroed, we don't use it anyway */
+ memcpy(conf.conf.key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ status->gtk.tkip_mic_key, 8);
+ break;
+ }
+
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ if (IS_ERR(key))
+ return false;
+ iwl_mvm_set_key_rx_seq(key, status);
+ }
+
+ if (status->num_of_gtk_rekeys) {
+ __be64 replay_ctr =
+ cpu_to_be64(le64_to_cpu(status->replay_ctr));
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ }
+
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+ return true;
+}
+
/* releases the MVM mutex */
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
- struct iwl_wowlan_status *status;
- int ret, len;
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status_v6 *status_v6;
+ int ret, len, status_size, i;
+ bool keep;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+ status_size = sizeof(struct iwl_wowlan_status_v6);
+ else
+ status_size = sizeof(struct iwl_wowlan_status_v4);
+
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ if (len - sizeof(struct iwl_cmd_header) < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
- status = (void *)cmd.resp_pkt->data;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+ status_v6 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v6->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v6->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v6->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v6->wake_packet_bufsize);
+ status.wake_packet = status_v6->wake_packet;
+ } else {
+ struct iwl_wowlan_status_v4 *status_v4;
+ status_v6 = NULL;
+ status_v4 = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(status_v4->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(status_v4->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(status_v4->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(status_v4->wake_packet_bufsize);
+ status.wake_packet = status_v4->wake_packet;
+ }
if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+ status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
+ /* still at hard-coded place 0 for D3 image */
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[0],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out_free_resp;
+
+ mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = status.qos_seq_ctr[i];
+ /* firmware stores last-used value, we store next value */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
- iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+ iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+
iwl_free_resp(&cmd);
- return;
+ return keep;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
+ return false;
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@ static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
#endif
}
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_resume_disconnect(vif);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -1368,7 +1803,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
- iwl_mvm_query_wakeup_reasons(mvm, vif);
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
@@ -1376,8 +1811,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_unlock(&mvm->mutex);
out:
- if (!test && vif)
- ieee80211_resume_disconnect(vif);
+ if (!test)
+ ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index aac81b8984b0..9864d713eb2c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -246,58 +246,56 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
- const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+ char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
-
- if (!mvm->ucode_loaded)
- return -EIO;
-
- if (copy_from_user(buf, user_buf, sizeof(buf)))
- return -EFAULT;
-
- if (sscanf(buf, "%d", &allow) != 1)
- return -EINVAL;
-
- IWL_DEBUG_POWER(mvm, "%s device power down\n",
- allow ? "allow" : "prevent");
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos = 0;
- /*
- * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
- */
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+ mvm->disable_power_off);
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+ mvm->disable_power_off_d3);
- return count;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- char buf[8] = {};
- int allow;
+ char buf[64] = {};
+ int ret;
+ int val;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ if (!mvm->ucode_loaded)
+ return -EIO;
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
- if (sscanf(buf, "%d", &allow) != 1)
+ if (!strncmp("disable_power_off_d0=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off = val;
+ } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off_d3 = val;
+ } else {
return -EINVAL;
+ }
- IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
- allow ? "allow" : "prevent");
-
- /*
- * TODO: When WoWLAN FW alive notification happens, driver will send
- * REPLY_DEBUG_CMD setting power_down_allow flag according to
- * mvm->prevent_power_down_d3
- */
- mvm->prevent_power_down_d3 = !allow;
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ mutex_unlock(&mvm->mutex);
- return count;
+ return ret ?: count;
}
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -344,6 +342,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
dbgfs_pm->disable_power_off = val;
+ break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@@ -371,7 +370,8 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
int val;
int ret;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +394,9 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18)) {
+ } else if (!strncmp("disable_power_off=", buf, 18) &&
+ !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
if (sscanf(buf + 18, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +583,21 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
- notif->bt_status);
+ notif->bt_status);
pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
- notif->bt_open_conn);
+ notif->bt_open_conn);
pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
- notif->bt_traffic_load);
+ notif->bt_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
- notif->bt_agg_traffic_load);
+ notif->bt_agg_traffic_load);
pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
mutex_unlock(&mvm->mutex);
@@ -600,6 +608,38 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+ char buf[256];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_primary_ci),
+ !!cmd->co_run_bw_primary);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
+ le64_to_cpu(cmd->bt_secondary_ci),
+ !!cmd->co_run_bw_secondary);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+ pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+ iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+ pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+ iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@@ -615,9 +655,11 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
int pos = 0;
char *buf;
int ret;
- int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
- sizeof(struct mvm_statistics_rx_non_phy) * 10 +
- sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+ /* 43 is the size of each data line, 33 is the size of each header */
+ size_t bufsz =
+ ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
+ (4 * 33) + 1;
+
struct mvm_statistics_rx_phy *ofdm;
struct mvm_statistics_rx_phy *cck;
struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +754,7 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+ PRINT_STATS_LE32("mac_id", general->mac_id);
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +800,59 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
return count;
}
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ /* print which antennas were set for the scan command by the user */
+ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+ if (mvm->scan_rx_ant & ANT_A)
+ pos += scnprintf(buf + pos, bufsz - pos, "A");
+ if (mvm->scan_rx_ant & ANT_B)
+ pos += scnprintf(buf + pos, bufsz - pos, "B");
+ if (mvm->scan_rx_ant & ANT_C)
+ pos += scnprintf(buf + pos, bufsz - pos, "C");
+ pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[8];
+ int buf_size;
+ u8 scan_rx_ant;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+
+ /* get the argument from the user and check if it is valid */
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+ return -EINVAL;
+ if (scan_rx_ant > ANT_ABC)
+ return -EINVAL;
+ if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+ return -EINVAL;
+
+ /* change the rx antennas for scan command */
+ mvm->scan_rx_ant = scan_rx_ant;
+
+ return count;
+}
+
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -968,7 +1064,8 @@ static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
char buf[8] = {};
int store;
- if (copy_from_user(buf, user_buf, sizeof(buf)))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1160,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
#endif
@@ -1087,10 +1186,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
- MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
+ S_IWUSR | S_IRUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 05c61d6f384e..4ea5e24ca92d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -82,6 +82,8 @@
* @BT_USE_DEFAULTS:
* @BT_SYNC_2_BT_DISABLE:
* @BT_COEX_CORUNNING_TBL_EN:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
BT_CH_PRIMARY_EN = BIT(0),
@@ -95,14 +97,16 @@ enum iwl_bt_coex_flags {
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_USE_DEFAULTS = BIT(6),
BT_SYNC_2_BT_DISABLE = BIT(7),
- /*
- * For future use - when the flags will be enlarged
- * BT_COEX_CORUNNING_TBL_EN = BIT(8),
- */
+ BT_COEX_CORUNNING_TBL_EN = BIT(8),
+ BT_COEX_MPLUT_TBL_EN = BIT(9),
+ /* Bit 10 is reserved */
+ BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
};
/*
* indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
*/
enum iwl_bt_coex_valid_bit_msk {
BT_VALID_ENABLE = BIT(0),
@@ -121,11 +125,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_CORUN_LUT_40 = BIT(13),
BT_VALID_ANT_ISOLATION = BIT(14),
BT_VALID_ANT_ISOLATION_THRS = BIT(15),
- /*
- * For future use - when the valid flags will be enlarged
- * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
- * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
- */
+ BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
+ BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
};
/**
@@ -142,48 +143,88 @@ enum iwl_bt_reduced_tx_power {
BT_REDUCED_TX_POWER_DATA = BIT(1),
};
+enum iwl_bt_coex_lut_type {
+ BT_COEX_TIGHT_LUT = 0,
+ BT_COEX_LOOSE_LUT,
+ BT_COEX_TX_DIS_LUT,
+
+ BT_COEX_MAX_LUT,
+};
+
#define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
/**
* struct iwl_bt_coex_cmd - bt coex configuration command
* @flags:&enum iwl_bt_coex_flags
- * @lead_time:
* @max_kill:
- * @bt3_time_t7_value:
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @bt3_prio_sample_time:
- * @bt3_timer_t2_value:
- * @bt4_reaction_time:
- * @decision_lut[12]:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- * @bt_prio_boost: values for PTA boost register
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
*/
struct iwl_bt_coex_cmd {
- u8 flags;
- u8 lead_time;
+ __le32 flags;
u8 max_kill;
- u8 bt3_time_t7_value;
+ u8 bt_reduced_tx_power;
+ u8 reserved[2];
+
+ u8 bt4_antenna_isolation;
+ u8 bt4_antenna_isolation_thr;
+ u8 bt4_tx_tx_delta_freq_thr;
+ u8 bt4_tx_rx_max_freq0;
+
+ __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+ __le32 wifi_tx_prio_boost;
+ __le32 wifi_rx_prio_boost;
__le32 kill_ack_msk;
__le32 kill_cts_msk;
- u8 bt3_prio_sample_time;
- u8 bt3_timer_t2_value;
- __le16 bt4_reaction_time;
- __le32 decision_lut[BT_COEX_LUT_SIZE];
- u8 bt_reduced_tx_power;
- u8 reserved;
- __le16 valid_bit_msk;
- __le32 bt_prio_boost;
- u8 reserved2;
- u8 wifi_tx_prio_boost;
- __le16 wifi_rx_prio_boost;
+
+ __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+ __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+ __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+ __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+ __le32 valid_bit_msk;
} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+ __le64 bt_primary_ci;
+ __le64 bt_secondary_ci;
+
+ u8 co_run_bw_primary;
+ u8 co_run_bw_secondary;
+ u8 primary_ch_phy_id;
+ u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@ enum iwl_bt_mxbox_dw3 {
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
+enum iwl_bt_activity_grading {
+ BT_OFF = 0,
+ BT_ON_NO_CONNECTION = 1,
+ BT_LOW_TRAFFIC = 2,
+ BT_HIGH_TRAFFIC = 3,
+};
+
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
* @mbox_msg: message from BT to WiFi
- * @:bt_status: 0 - off, 1 - on
- * @:bt_open_conn: number of BT connections open
- * @:bt_traffic_load: load of BT traffic
- * @:bt_agg_traffic_load: aggregated load of BT traffic
- * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
+ __le32 msg_idx;
u8 bt_status;
u8 bt_open_conn;
u8 bt_traffic_load;
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 reserved[3];
+
+ __le32 primary_ch_lut;
+ __le32 secondary_ch_lut;
+ __le32 bt_activity_grading;
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@ struct iwl_bt_coex_prio_tbl_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __packed;
-enum iwl_bt_coex_env_action {
- BT_COEX_ENV_CLOSE = 0,
- BT_COEX_ENV_OPEN = 1,
-}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
-
-/**
- * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
- * @action: enum %iwl_bt_coex_env_action
- * @type: enum %iwl_bt_coex_prio_table_event
- */
-struct iwl_bt_coex_prot_env_cmd {
- u8 action; /* 0 = closed, 1 = open */
- u8 type; /* 0 .. 15 */
- u8 reserved[2];
-} __packed;
-
#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index df72fcdf8170..4e7dd8cf87dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -100,7 +100,12 @@ enum iwl_proto_offloads {
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
/**
* struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@ struct iwl_proto_offload_cmd_v2 {
u8 reserved2[3];
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+struct iwl_ns_config {
+ struct in6_addr source_ipv6_addr;
+ struct in6_addr dest_ipv6_addr;
+ u8 target_mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+ struct in6_addr addr;
+ __le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
/*
* WOWLAN_PATTERNS
@@ -293,7 +335,7 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v4 {
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+struct iwl_wowlan_gtk_status {
+ u8 key_index;
+ u8 reserved[3];
+ u8 decrypt_key[16];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed;
+
+struct iwl_wowlan_status_v6 {
+ struct iwl_wowlan_gtk_status gtk;
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 98b1feb43d38..39c3148bdfa8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -170,12 +170,14 @@ struct iwl_mac_data_ap {
* @beacon_tsf: beacon transmit time in TSF
* @bi: beacon interval in TU
* @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
*/
struct iwl_mac_data_ibss {
__le32 beacon_time;
__le64 beacon_tsf;
__le32 bi;
__le32 bi_reciprocal;
+ __le32 beacon_template;
} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
/**
@@ -372,4 +374,13 @@ static inline u32 iwl_mvm_reciprocal(u32 v)
return 0xFFFFFFFF / v;
}
+#define IWL_NONQOS_SEQ_GET 0x1
+#define IWL_NONQOS_SEQ_SET 0x2
+struct iwl_nonqos_seq_query_cmd {
+ __le32 get_set_flag;
+ __le32 mac_id_n_color;
+ __le16 value;
+ __le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 8e7ab41079ca..5cb93ae5cd2f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -132,6 +132,33 @@ struct iwl_powertable_cmd {
} __packed;
/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow. This flag should be
+ * always set to '1' unless one need to disable actual power down for debug
+ * purposes.
+ * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ * that power management is disabled. '0' Power management is enabled, one
+ * of power schemes is applied.
+*/
+enum iwl_device_power_flags {
+ DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_CAM_MSK = BIT(13),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
+ */
+struct iwl_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ __le16 flags;
+ __le16 reserved;
+} __packed;
+
+/**
* struct iwl_mac_power_cmd - New power command containing uAPSD support
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
* @id_and_color: MAC contex identifier
@@ -290,7 +317,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
-#define IWL_BA_ESCAPE_TIMER_D3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index fdd33bc0a594..538f1c7a5966 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -68,6 +68,7 @@
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
*/
enum {
IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@ enum {
IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
IWL_RATE_6M_INDEX,
IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+ IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+ IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX,
+ IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
IWL_RATE_18M_INDEX,
+ IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX,
+ IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
IWL_RATE_36M_INDEX,
+ IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX,
+ IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
IWL_RATE_54M_INDEX,
+ IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
IWL_RATE_60M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+ IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+ IWL_RATE_MCS_8_INDEX,
+ IWL_RATE_MCS_9_INDEX,
+ IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT,
+ IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@ enum {
IWL_RATE_2M_PLCP = 20,
IWL_RATE_5M_PLCP = 55,
IWL_RATE_11M_PLCP = 110,
+ IWL_RATE_INVM_PLCP = -1,
};
/*
@@ -164,6 +179,8 @@ enum {
* which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
*/
#define RATE_HT_MCS_RATE_CODE_MSK 0x7
+#define RATE_HT_MCS_NSS_POS 3
+#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
/* Bit 10: (1) Use Green Field preamble */
#define RATE_HT_MCS_GF_POS 10
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 83cb9b992ea4..c3782b48ded1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -356,6 +356,7 @@ struct iwl_scan_complete_notif {
/* scan offload */
#define IWL_MAX_SCAN_CHANNELS 40
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
#define IWL_SCAN_MAX_PROFILES 11
#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
@@ -368,6 +369,12 @@ struct iwl_scan_complete_notif {
#define IWL_FULL_SCAN_MULTIPLIER 5
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+enum scan_framework_client {
+ SCAN_CLIENT_SCHED_SCAN = BIT(0),
+ SCAN_CLIENT_NETDETECT = BIT(1),
+ SCAN_CLIENT_ASSET_TRACKING = BIT(2),
+};
+
/**
* struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
* @scan_flags: see enum iwl_scan_flags
@@ -449,11 +456,12 @@ struct iwl_scan_offload_cfg {
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
* @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
*/
struct iwl_scan_offload_blacklist {
u8 ssid[ETH_ALEN];
u8 reported_rssi;
- u8 reserved;
+ u8 client_bitmap;
} __packed;
enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@ enum iwl_scan_offload_band_selection {
* @aut_alg: authentication olgorithm to match - bitmap
* @network_type: enum iwl_scan_offload_network_type
* @band_selection: enum iwl_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
*/
struct iwl_scan_offload_profile {
u8 ssid_index;
@@ -482,7 +491,8 @@ struct iwl_scan_offload_profile {
u8 auth_alg;
u8 network_type;
u8 band_selection;
- u8 reserved[3];
+ u8 client_bitmap;
+ u8 reserved[2];
} __packed;
/**
@@ -491,13 +501,18 @@ struct iwl_scan_offload_profile {
* @profiles: profiles to search for match
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
*/
struct iwl_scan_offload_profile_cfg {
- struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
u8 blacklist_len;
u8 num_profiles;
- u8 reserved[2];
+ u8 match_notify;
+ u8 pass_match;
+ u8 active_clients;
+ u8 reserved[3];
} __packed;
/**
@@ -560,4 +575,15 @@ struct iwl_scan_offload_complete {
u8 reserved;
} __packed;
+/**
+ * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap: SSIDs indexes found in this iteration
+ * @client_bitmap: clients that are active and wait for this notification
+ */
+struct iwl_sched_scan_results {
+ __le16 ssid_bitmap;
+ u8 client_bitmap;
+ u8 reserved;
+};
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index a30691a8a85b..4aca5933a65d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -247,7 +247,7 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
* @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd {
+struct iwl_mvm_add_sta_cmd_v5 {
u8 add_modify;
u8 unicast_tx_key_id;
u8 multicast_tx_key_id;
@@ -313,6 +313,57 @@ struct iwl_mvm_add_sta_cmd {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
+ * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwl_mvm_add_sta_cmd_v6 {
+ u8 add_modify;
+ u8 reserved1;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwl_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ u8 sta_id;
+ u8 key_offset;
+ __le16 key_flags;
+ u8 key[16];
+ u8 key2[16];
+ u8 rx_secur_seq_cnt[16];
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved;
+ __le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully
* @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 66264cc5a016..bad5a552dd8d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -72,17 +72,17 @@
#include "fw-api-d3.h"
#include "fw-api-bt-coex.h"
-/* queue and FIFO numbers by usage */
+/* maximal number of Tx queues in any platform */
+#define IWL_MVM_MAX_QUEUES 20
+
+/* Tx queue numbers */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
- IWL_MVM_AUX_QUEUE = 15,
- IWL_MVM_FIRST_AGG_QUEUE = 16,
- IWL_MVM_NUM_QUEUES = 20,
- IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
- IWL_MVM_CMD_FIFO = 7
};
+#define IWL_MVM_CMD_FIFO 7
+
#define IWL_MVM_STATION_COUNT 16
/* commands */
@@ -97,6 +97,7 @@ enum {
DBG_CFG = 0x9,
/* station table */
+ ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
REMOVE_STA = 0x19,
@@ -114,6 +115,7 @@ enum {
TIME_EVENT_NOTIFICATION = 0x2a,
BINDING_CONTEXT_CMD = 0x2b,
TIME_QUOTA_CMD = 0x2c,
+ NON_QOS_TX_COUNTER_CMD = 0x2d,
LQ_CMD = 0x4e,
@@ -130,6 +132,7 @@ enum {
SCAN_OFFLOAD_COMPLETE = 0x6D,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+ MATCH_FOUND_NOTIFICATION = 0xd9,
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@ enum {
BT_COEX_PRIO_TABLE = 0xcc,
BT_COEX_PROT_ENV = 0xcd,
BT_PROFILE_NOTIFICATION = 0xce,
+ BT_COEX_CI = 0x5d,
REPLY_BEACON_FILTERING_CMD = 0xd2,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c76299a3a1e0..70e5297646b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -151,13 +151,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->cur_ucode;
static const u8 alive_cmd[] = { MVM_ALIVE };
- mvm->cur_ucode = ucode_type;
fw = iwl_get_ucode_image(mvm, ucode_type);
-
- mvm->ucode_loaded = false;
-
- if (!fw)
+ if (WARN_ON(!fw))
return -EINVAL;
+ mvm->cur_ucode = ucode_type;
+ mvm->ucode_loaded = false;
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
@@ -199,7 +197,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
- if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+ if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
mvm->queue_to_mac80211[i] = i;
else
mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->init_ucode_run)
+ if (mvm->init_ucode_complete)
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +262,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (ret)
goto error;
+ /* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm);
@@ -273,6 +272,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
}
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (iwlwifi_mod_params.nvm_file)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
WARN_ON(ret);
@@ -310,7 +313,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
MVM_UCODE_CALIB_TIMEOUT);
if (!ret)
- mvm->init_ucode_run = true;
+ mvm->init_ucode_complete = true;
goto out;
error:
@@ -353,8 +356,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
- /* If we were in RFKILL during module loading, load init ucode now */
- if (!mvm->init_ucode_run) {
+ /*
+ * If we haven't completed the run of the init ucode during
+ * module loading, load init ucode now
+ * (for example, if we were in RFKILL)
+ */
+ if (!mvm->init_ucode_complete) {
ret = iwl_run_init_mvm_ucode(mvm, false);
if (ret && !iwlmvm_mod_params.init_dbg) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +431,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+ ret = iwl_mvm_power_update_device_mode(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 5fe23a5ea9b6..f41f9b079831 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -80,7 +80,7 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
- unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+ unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
enum iwl_tsf_id preferred_tsf;
bool found_vif;
};
@@ -218,7 +218,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.preferred_tsf = NUM_TSF_IDS,
.used_hw_queues = {
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(IWL_MVM_AUX_QUEUE) |
+ BIT(mvm->aux_queue) |
BIT(IWL_MVM_CMD_QUEUE)
},
.found_vif = false,
@@ -242,9 +242,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
* that we should share it with another interface.
*/
- /* Currently, MAC ID 0 should be used only for the managed vif */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ break;
+ /* fall through */
+ default:
__clear_bit(0, data.available_mac_ids);
+ }
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO;
goto exit_fail;
@@ -317,9 +325,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
u8 queue = find_first_zero_bit(data.used_hw_queues,
- IWL_MVM_FIRST_AGG_QUEUE);
+ mvm->first_agg_queue);
- if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
@@ -559,8 +567,12 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
/* Don't use cts to self as the fw doesn't support it currently. */
- if (vif->bss_conf.use_cts_prot)
+ if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ cmd->protection_flags |=
+ cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ }
/*
* I think that we should enable these 2 flags regardless the HT PROT
@@ -707,8 +719,35 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_CRC32);
+ mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST);
+ /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ cmd.ibss.bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+ /* TODO: Assumes that the beacon id == mac context id */
+ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -721,7 +760,8 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
struct iwl_mvm_go_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->ap_ibss_active)
data->go_active = true;
}
@@ -833,9 +873,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
/* Set up TX beacon command fields */
- iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
- beacon->data,
- beacon_skb_len);
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ beacon->data,
+ beacon_skb_len);
/* Submit command */
cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +889,15 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd(mvm, &cmd);
}
-/* The beacon template for the AP/GO context has changed and needs update */
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
int ret;
- WARN_ON(vif->type != NL80211_IFTYPE_AP);
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
beacon = ieee80211_beacon_get(mvm->hw, vif);
if (!beacon)
@@ -1018,6 +1060,8 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
case NL80211_IFTYPE_P2P_DEVICE:
return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
default:
break;
}
@@ -1038,6 +1082,9 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (ret)
return ret;
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
mvmvif->uploaded = true;
return 0;
}
@@ -1077,6 +1124,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
mvmvif->uploaded = false;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9833cdf6177c..74bc2c8af06d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -77,6 +77,7 @@
#include "iwl-eeprom-parse.h"
#include "fw-api-scan.h"
#include "iwl-phy-db.h"
+#include "testmode.h"
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
@@ -138,6 +139,14 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
}
}
+static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+ /* we create the 802.11 header and SSID element */
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
+ return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -155,10 +164,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
- hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
+ hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->rate_control_algorithm = "iwl-mvm-rs";
@@ -171,6 +179,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ }
+
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
@@ -181,6 +195,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE);
+ /* IBSS has bugs in older versions */
+ if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
@@ -191,8 +209,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -212,9 +228,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
- /* we create the 802.11 header and a max-length SSID element */
- hw->wiphy->max_scan_ie_len =
- mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +246,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+ }
+
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -548,7 +572,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
* In short: there's not much we can do at this point, other than
* allocating resources :)
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
qmask);
@@ -698,7 +723,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
* For AP/GO interface, the tear down of the resources allocated to the
* interface is be handled as part of the stop_ap flow.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (vif == mvm->noa_vif) {
+ mvm->noa_vif = NULL;
+ mvm->noa_duration = 0;
+ }
+#endif
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
goto out_release;
}
@@ -796,6 +828,27 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
return;
}
iwl_mvm_configure_mcast_filter(mvm, vif);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * If we're restarting then the firmware will
+ * obviously have lost synchronisation with
+ * the AP. It will attempt to synchronise by
+ * itself, but we can make it more reliable by
+ * scheduling a session protection time event.
+ *
+ * The firmware needs to receive a beacon to
+ * catch up with synchronisation, use 110% of
+ * the beacon interval.
+ *
+ * Set a large maximum delay to allow for more
+ * than a single interface.
+ */
+ u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+ iwl_mvm_protect_session(mvm, vif, dur, dur,
+ 5 * dur);
+ }
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -811,7 +864,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
/* Workaround for FW bug, otherwise FW disables device
* power save upon disassociation
*/
@@ -819,7 +873,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
- iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -848,7 +902,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
-static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +926,7 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_remove;
- mvmvif->ap_active = true;
+ mvmvif->ap_ibss_active = true;
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
@@ -883,10 +938,12 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (ret)
goto out_rm_bcast;
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_bt_coex_vif_change(mvm);
+
mutex_unlock(&mvm->mutex);
return 0;
@@ -901,7 +958,8 @@ out_unlock:
return ret;
}
-static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +968,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&mvm->mutex);
- mvmvif->ap_active = false;
+ mvmvif->ap_ibss_active = false;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
- /* Need to update the P2P Device MAC */
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -924,10 +984,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_unlock(&mvm->mutex);
}
-static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
{
/* Need to send a new beacon template to the FW */
if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1011,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
break;
case NL80211_IFTYPE_AP:
- iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
default:
/* shouldn't happen */
@@ -1163,7 +1225,54 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
/* Try really hard to protect the session and hear a beacon */
- iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
+ IWL_DEBUG_SCAN(mvm,
+ "SCHED SCAN request during internal scan - abort\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+
+ ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ goto err;
+
+ ret = iwl_mvm_sched_scan_start(mvm, req);
+ if (!ret)
+ goto out;
+err:
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1207,8 +1316,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
- if (vif->type == NL80211_IFTYPE_AP && !sta) {
- /* GTK on AP interface is a TX-only key, return 0 */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta) {
+ /*
+ * GTK on AP interface is a TX-only key, return 0;
+ * on IBSS they're per-station and because we're lazy
+ * we don't support them for RX, so do the same.
+ */
ret = 0;
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
@@ -1252,6 +1366,9 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
}
@@ -1445,6 +1562,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
+ iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -1464,14 +1582,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
- * (in bss_info_changed).
+ * (in bss_info_changed), similarly for IBSS.
*/
ret = 0;
goto out_unlock;
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MONITOR:
break;
default:
@@ -1517,10 +1635,10 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
- if (vif->type == NL80211_IFTYPE_AP)
- goto out_unlock;
-
switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ goto out_unlock;
case NL80211_IFTYPE_MONITOR:
mvmvif->monitor_active = false;
iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1668,72 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
}
-static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+ [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event)
+ void *data, int len)
+{
+ struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+ int err;
+ u32 noa_duration;
+
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[IWL_MVM_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+ case IWL_MVM_TM_CMD_SET_NOA:
+ if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+ !vif->bss_conf.enable_beacon ||
+ !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+ return -EINVAL;
+
+ noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+ if (noa_duration >= vif->bss_conf.beacon_int)
+ return -EINVAL;
+
+ mvm->noa_duration = noa_duration;
+ mvm->noa_vif = vif;
+
+ return iwl_mvm_update_quotas(mvm, NULL);
+ case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+ /* must be associated client vif - ignore authorized */
+ if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+ return -EINVAL;
+
+ if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int err;
- iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
+ mutex_lock(&mvm->mutex);
+ err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+ mutex_unlock(&mvm->mutex);
+
+ return err;
}
+#endif
struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
@@ -1578,23 +1754,27 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
.set_key = iwl_mvm_mac_set_key,
.update_tkip_key = iwl_mvm_mac_update_tkip_key,
.remain_on_channel = iwl_mvm_roc,
.cancel_remain_on_channel = iwl_mvm_cancel_roc,
- .rssi_callback = iwl_mvm_mac_rssi_callback,
-
.add_chanctx = iwl_mvm_add_chanctx,
.remove_chanctx = iwl_mvm_remove_chanctx,
.change_chanctx = iwl_mvm_change_chanctx,
.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
- .start_ap = iwl_mvm_start_ap,
- .stop_ap = iwl_mvm_stop_ap,
+ .start_ap = iwl_mvm_start_ap_ibss,
+ .stop_ap = iwl_mvm_stop_ap_ibss,
+ .join_ibss = iwl_mvm_start_ap_ibss,
+ .leave_ibss = iwl_mvm_stop_ap_ibss,
.set_tim = iwl_mvm_set_tim,
+ CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b0389279cc1e..fed21ef4162d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -73,7 +73,6 @@
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
-#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
@@ -162,6 +161,7 @@ enum iwl_power_scheme {
struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+ int (*power_update_device_mode)(struct iwl_mvm *mvm);
int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +241,18 @@ enum iwl_mvm_smps_type_request {
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
*/
struct iwl_mvm_vif_bf_data {
bool bf_enabled;
bool ba_enabled;
s8 ave_beacon_signal;
s8 last_cqm_event;
+ s8 bt_coex_min_thold;
+ s8 bt_coex_max_thold;
+ s8 last_bt_coex_event;
};
/**
@@ -255,8 +261,8 @@ struct iwl_mvm_vif_bf_data {
* @color: to solve races upon MAC addition and removal
* @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
* @uploaded: indicates the MAC context has been added to the device
- * @ap_active: indicates that ap context is configured, and that the interface
- * should get quota etc.
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ * should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @queue_params: QoS params for this MAC
@@ -272,7 +278,7 @@ struct iwl_mvm_vif {
u8 ap_sta_id;
bool uploaded;
- bool ap_active;
+ bool ap_ibss_active;
bool monitor_active;
struct iwl_mvm_vif_bf_data bf_data;
@@ -306,6 +312,9 @@ struct iwl_mvm_vif {
int tx_key_idx;
+ bool seqno_valid;
+ u16 seqno;
+
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +342,7 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
+ IWL_MVM_SCAN_SCHED,
};
/**
@@ -434,7 +444,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
- bool init_ucode_run;
+ bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
@@ -470,6 +480,9 @@ struct iwl_mvm {
enum iwl_scan_status scan_status;
struct iwl_scan_cmd *scan_cmd;
+ /* rx chain antennas set through debugfs for the scan command */
+ u8 scan_rx_ant;
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -479,7 +492,8 @@ struct iwl_mvm {
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool prevent_power_down_d3;
+ bool disable_power_off;
+ bool disable_power_off_d3;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +537,23 @@ struct iwl_mvm {
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
+ struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
const struct iwl_mvm_power_ops *pm_ops;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ u32 noa_duration;
+ struct ieee80211_vif *noa_vif;
+#endif
+
+ /* Tx queues */
+ u8 aux_queue;
+ u8 first_agg_queue;
+ u8 last_agg_queue;
};
/* Extract MVM priv from op_mode and _hw */
@@ -570,6 +595,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum ieee80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +636,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +711,23 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+/* Scheduled scan */
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req);
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +766,13 @@ static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
return mvm->pm_ops->power_disable(mvm, vif);
}
+static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+{
+ if (mvm->pm_ops->power_update_device_mode)
+ return mvm->pm_ops->power_update_device_mode(mvm);
+ return 0;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -745,6 +798,15 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +816,20 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+
+enum iwl_bt_kill_msk {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_SCO_HID_A2DP,
+ BT_KILL_MSK_REDUCED_TXPOW,
+ BT_KILL_MSK_MAX,
+};
+extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
+extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index edb94ea31654..2beffd028b67 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -77,7 +77,7 @@ static const int nvm_to_read[] = {
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 6000
+#define IWL_MAX_NVM_SECTION_SIZE 7000
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
#define MAX_NVM_FILE_LEN 16384
/*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
* HOW TO CREATE THE NVM FILE FORMAT:
* ------------------------------
* 1. create hex file, format:
@@ -277,20 +279,23 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
*
* 4. save as "iNVM_xxx.bin" under /lib/firmware
*/
-static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
{
- int ret, section_id, section_size;
+ int ret, section_size;
+ u16 section_id;
const struct firmware *fw_entry;
const struct {
__le16 word1;
__le16 word2;
u8 data[];
} *file_sec;
- const u8 *eof;
+ const u8 *eof, *temp;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
+
/*
* Obtain NVM image via request_firmware. Since we already used
* request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@ static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
break;
}
- ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
- section_size);
- if (ret < 0) {
- IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
+ IWL_ERR(mvm, "Invalid NVM section ID\n");
+ ret = -EINVAL;
break;
}
+ mvm->nvm_sections[section_id].data = temp;
+ mvm->nvm_sections[section_id].length = section_size;
/* advance to the next section */
file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@ out:
return ret;
}
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+ int i, ret;
+ u16 section_id;
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section_id = nvm_to_read[i];
+ ret = iwl_nvm_write_section(mvm, section_id,
+ sections[section_id].data,
+ sections[section_id].length);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
int iwl_nvm_init(struct iwl_mvm *mvm)
{
int ret, i, section;
@@ -385,36 +418,36 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
- ret = iwl_mvm_load_external_nvm(mvm);
+ ret = iwl_mvm_read_external_nvm(mvm);
if (ret)
return ret;
- }
-
- /* Read From FW NVM */
- IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
-
- /* TODO: find correct NVM max size for a section */
- nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
- GFP_KERNEL);
- if (!nvm_buffer)
- return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
- section = nvm_to_read[i];
- /* we override the constness for initial read */
- ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
- if (ret < 0)
- break;
- temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
- if (!temp) {
- ret = -ENOMEM;
- break;
+ } else {
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+ /* TODO: find correct NVM max size for a section */
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ section = nvm_to_read[i];
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+ if (ret < 0)
+ break;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
}
- mvm->nvm_sections[section].data = temp;
- mvm->nvm_sections[section].length = ret;
+ kfree(nvm_buffer);
+ if (ret < 0)
+ return ret;
}
- kfree(nvm_buffer);
- if (ret < 0)
- return ret;
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2fcc8ef88a68..d86083c6f445 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -224,6 +224,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+ iwl_mvm_rx_scan_offload_complete_notif, false),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+ false),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TIME_EVENT_NOTIFICATION),
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
+ CMD(NON_QOS_TX_COUNTER_CMD),
CMD(RADIO_VERSION_NOTIFICATION),
CMD(SCAN_REQUEST_CMD),
CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+ CMD(ADD_STA_KEY),
CMD(ADD_STA),
CMD(REMOVE_STA),
CMD(LQ_CMD),
CMD(SCAN_OFFLOAD_CONFIG_CMD),
+ CMD(MATCH_FOUND_NOTIFICATION),
CMD(SCAN_OFFLOAD_REQUEST_CMD),
CMD(SCAN_OFFLOAD_ABORT_CMD),
CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
+ CMD(BT_COEX_CI),
};
#undef CMD
@@ -344,6 +352,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+ mvm->aux_queue = 15;
+ mvm->first_agg_queue = 16;
+ mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ if (mvm->cfg->base_params->num_of_queues == 16) {
+ mvm->aux_queue = 11;
+ mvm->first_agg_queue = 12;
+ }
+
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
iwl_mvm_tt_initialize(mvm);
- mutex_lock(&mvm->mutex);
- err = iwl_run_init_mvm_ucode(mvm, true);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ /*
+ * If the NVM exists in an external file,
+ * there is no need to unnecessarily power up the NIC at driver load
+ */
+ if (iwlwifi_mod_params.nvm_file) {
+ iwl_nvm_init(mvm);
+ } else {
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
- /* Stop the hw after the ALIVE and NVM has been read */
- if (!iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_hw(mvm->trans, false);
+ /* Stop the hw after the ALIVE and NVM has been read */
+ if (!iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_hw(mvm->trans, false);
+ }
scan_size = sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
@@ -435,7 +459,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
mvm->pm_ops = &pm_mac_ops;
else
mvm->pm_ops = &pm_legacy_ops;
@@ -449,7 +473,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- iwl_trans_stop_hw(trans, true);
+ if (!iwlwifi_mod_params.nvm_file)
+ iwl_trans_stop_hw(trans, true);
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -715,6 +740,9 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
case IWL_MVM_SCAN_OS:
ieee80211_scan_completed(mvm->hw, true);
break;
+ case IWL_MVM_SCAN_SCHED:
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
}
if (mvm->restart_fw > 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d58e393324ef..550824aa84ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -300,11 +300,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
@@ -319,10 +314,31 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
- cmd->heavy_tx_thld_packets =
- IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
- cmd->heavy_rx_thld_packets =
- IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
cmd->heavy_tx_thld_percentage =
IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
cmd->heavy_rx_thld_percentage =
@@ -430,6 +446,32 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ struct iwl_device_power_cmd cmd = {
+ .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+ };
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ return 0;
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
+ mvm->disable_power_off)
+ cmd.flags &=
+ cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ IWL_DEBUG_POWER(mvm,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+ &cmd);
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, char *buf,
@@ -440,10 +482,11 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -609,6 +652,7 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
+ .power_update_device_mode = iwl_mvm_power_update_device,
.power_disable = iwl_mvm_power_mac_disable,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 5c6ae16ec52b..17e2bc827f9a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -110,7 +110,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_AP:
- if (mvmvif->ap_active)
+ case NL80211_IFTYPE_ADHOC:
+ if (mvmvif->ap_ibss_active)
data->n_interfaces[id]++;
break;
case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
break;
case NL80211_IFTYPE_P2P_DEVICE:
break;
- case NL80211_IFTYPE_ADHOC:
- if (vif->bss_conf.ibss_joined)
- data->n_interfaces[id]++;
- break;
default:
WARN_ON_ONCE(1);
break;
}
}
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct iwl_mvm_vif *mvmvif;
+ int i, phy_id = -1, beacon_int = 0;
+
+ if (!mvm->noa_duration || !mvm->noa_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ phy_id = mvmvif->phy_ctxt->id;
+ beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+ u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+ u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+
+ if (id != phy_id)
+ continue;
+
+ quota *= (beacon_int - mvm->noa_duration);
+ quota /= beacon_int;
+
+ cmd->quotas[i].quota = cpu_to_le32(quota);
+ }
+#endif
+}
+
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
/* Give the remainder of the session to the first binding */
le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 4ffaa3fa153f..a0b4cc8d9c3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -82,13 +82,24 @@ static const u8 ant_toggle_lookup[] = {
[ANT_ABC] = ANT_ABC,
};
-#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##rp##M_INDEX, \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+ IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX }
+#define IWL_DECLARE_MCS_RATE(s) \
+ [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_INVM_INDEX, \
+ IWL_RATE_INVM_INDEX }
+
/*
* Parameter order:
* rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
- /* FIXME:RS: ^^ should be INV (legacy) */
+ IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
+ IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
+ IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
+ IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
+ IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
+ IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
+ IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
+ IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
+ IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
+ IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
};
static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
- /* HT rate format */
if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = rs_extract_rate(rate_n_flags);
-
- WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+ idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
+ /* skip 9M not supported in HT*/
if (idx >= IWL_RATE_9M_INDEX)
idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
- /* legacy rate format, search for match in table */
+ /* skip 9M not supported in VHT*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx++;
+ if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+ return idx;
} else {
+ /* legacy rate format, search for match in table */
+
+ u8 legacy_rate = rs_extract_rate(rate_n_flags);
for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
- if (iwl_rates[idx].plcp ==
- rs_extract_rate(rate_n_flags))
+ if (iwl_rates[idx].plcp == legacy_rate)
return idx;
}
@@ -155,6 +171,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
@@ -180,35 +197,52 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
- {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
+ {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
+ {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
- {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
+ {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
+ {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
+ {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
+ {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+ {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
- {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
- {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
+ {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+ {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
- {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
+ {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+ {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
+ {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
+ {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+ {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
};
/* mbps, mcs */
@@ -263,7 +297,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
if (lq_sta->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
}
@@ -275,17 +309,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
{
int ret = -EAGAIN;
- /*
- * Don't create TX aggregation sessions when in high
- * BT traffic, as they would just be disrupted by BT.
- */
- if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
- IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
- BT_MBOX_MSG(&mvm->last_bt_notif,
- 3, TRAFFIC_LOAD));
- return ret;
- }
-
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
*/
/* FIXME:RS:remove this function and put the flags statically in the table */
static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
+ struct iwl_scale_tbl_info *tbl, int index)
{
u32 rate_n_flags = 0;
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwl_rates[index].plcp;
+ rate_n_flags |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
rate_n_flags |= RATE_MCS_CCK_MSK;
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
+ return rate_n_flags;
+ }
+
+ if (is_ht(tbl->lq_type)) {
+ if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
+ index = IWL_LAST_HT_RATE;
}
- rate_n_flags = RATE_MCS_HT_MSK;
+ rate_n_flags |= RATE_MCS_HT_MSK;
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_siso;
- else if (is_mimo2(tbl->lq_type))
- rate_n_flags |= iwl_rates[index].plcp_mimo2;
+ if (is_ht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
else
WARN_ON_ONCE(1);
+ } else if (is_vht(tbl->lq_type)) {
+ if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+ IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+ index = IWL_LAST_VHT_RATE;
+ }
+ rate_n_flags |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(tbl->lq_type))
+ rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+ else
+ WARN_ON_ONCE(1);
+
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40)
- rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_HT_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(mvm, "GF was set with SGI:SISO\n");
- }
- }
- }
+ rate_n_flags |= tbl->bw;
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
return rate_n_flags;
}
@@ -473,7 +501,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
+ u8 nss;
memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
return -EINVAL;
}
tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
tbl->lq_type = LQ_NONE;
tbl->max_search = IWL_MAX_SEARCH;
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ /* Legacy */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
+ !(rate_n_flags & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
}
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- tbl->is_ht40 = 1;
-
- mcs = rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
- if (num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
+
+ return 0;
+ }
+
+ /* HT or VHT */
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_HT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_HT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ tbl->lq_type = LQ_VHT_SISO;
+ WARN_ON_ONCE(num_of_ant != 1);
+ } else if (nss == 2) {
+ tbl->lq_type = LQ_VHT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
} else {
- WARN_ON_ONCE(num_of_ant == 3);
+ WARN_ON_ONCE(1);
}
}
+
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
+ WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_vht(tbl->lq_type));
+
return 0;
}
@@ -550,22 +599,6 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
}
/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool rs_use_green(struct ieee80211_sta *sta)
-{
- /*
- * There's a bug somewhere in this code that causes the
- * scaling to get stuck because GF+SGI can't be combined
- * in SISO rates. Until we find that bug, disable GF, it
- * has only limited benefit and we still interoperate with
- * GF APs since we can always receive GF transmissions.
- */
- return false;
-}
-
-/**
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -576,16 +609,15 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (is_legacy(rate_type)) {
+ if (is_legacy(rate_type))
return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else {
- WARN_ON_ONCE(!is_mimo2(rate_type));
- return lq_sta->active_mimo2_rate;
- }
- }
+ else if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate_type))
+ return lq_sta->active_mimo2_rate;
+
+ WARN_ON_ONCE(1);
+ return 0;
}
static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
u16 rate_mask;
u16 high_low;
u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
struct iwl_mvm *mvm = lq_sta->drv;
/* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
+ tbl->lq_type = LQ_LEGACY_A;
else
- tbl->lq_type = LQ_G;
+ tbl->lq_type = LQ_LEGACY_G;
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- tbl->is_ht40 = 0;
+ tbl->bw = 0;
tbl->is_SGI = 0;
tbl->max_search = IWL_MAX_SEARCH;
}
@@ -701,7 +732,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
low = scale_index;
out:
- return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+ return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
}
/*
@@ -714,6 +745,18 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
(a->is_SGI == b->is_SGI);
}
+static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+{
+ if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_40;
+ else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_80;
+ else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ return RATE_MCS_CHAN_WIDTH_160;
+
+ return RATE_MCS_CHAN_WIDTH_20;
+}
+
/*
* mac80211 sends us Tx status
*/
@@ -783,16 +826,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
*/
if (info->band == IEEE80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
+ } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
+ mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
}
+
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
(tbl_type.ant_type != info->status.antenna) ||
(!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
(!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
(rs_index != mac_index)) {
IWL_DEBUG_RATE(mvm,
"initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
+ !(is_vht(tbl->lq_type)))) {
tbl->expected_tpt = expected_tpt_legacy;
return;
}
@@ -958,18 +1009,40 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
return;
}
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
/* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
* status */
- if (is_siso(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else {
- WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ if (is_siso(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_siso_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_siso_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_siso_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else if (is_mimo2(tbl->lq_type)) {
+ switch (tbl->bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(1);
}
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
@@ -1084,9 +1157,47 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
return new_rate;
}
-static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+/* Move to the next action and wrap around to the first action in case
+ * we're at the last action. Assumes actions start at 0.
+ */
+static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
+ u8 last_action)
+{
+ BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
+ BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
+
+ tbl->action = (tbl->action + 1) % (last_action + 1);
+}
+
+static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+ else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ tbl->bw = RATE_MCS_CHAN_WIDTH_40;
+ else
+ tbl->bw = RATE_MCS_CHAN_WIDTH_20;
+}
+
+static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
+ struct ieee80211_sta *sta)
{
- return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(tbl) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(tbl) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+
+ return false;
}
/*
@@ -1099,7 +1210,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
{
u16 rate_mask;
s32 rate;
- s8 is_green = lq_sta->is_green;
if (!sta->ht_cap.ht_supported)
return -1;
@@ -1113,16 +1223,12 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
- tbl->lq_type = LQ_MIMO2;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1150,7 +1256,6 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
- u8 is_green = lq_sta->is_green;
s32 rate;
if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
- tbl->lq_type = LQ_SISO;
+ tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
+ rs_set_bw_from_sta(tbl, sta);
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1181,9 +1279,9 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
rate, rate_mask);
return -1;
}
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
+ IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+ tbl->current_rate);
return 0;
}
@@ -1211,14 +1309,10 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
+ case IWL_LEGACY_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
/* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1285,9 +1377,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
@@ -1300,12 +1390,10 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
@@ -1314,40 +1402,17 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_MIMO2;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- valid_tx_ant =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
- if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ tbl->action = IWL_SISO_SWITCH_ANTENNA;
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
+ case IWL_SISO_SWITCH_ANTENNA:
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
+ if (tx_chains_num <= 1)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
break;
case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(mvm,
- "SGI was set in GF+SISO\n");
- }
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1424,9 +1475,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1440,63 +1489,20 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
- s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
u8 update_search_tbl_counter = 0;
int ret;
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- tbl->action = IWL_MIMO2_SWITCH_SISO_A;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
-
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
+ if (!rs_sgi_allowed(tbl, sta))
break;
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
}
search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
+ rate_n_flags_from_tbl(mvm, search_tbl, index);
update_search_tbl_counter = 1;
goto out;
default:
WARN_ON_ONCE(1);
}
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (tbl->action == start_action)
break;
@@ -1564,9 +1563,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
return 0;
out:
lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1660,15 +1657,16 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* setup rate table in uCode
*/
static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
+ int index)
{
u32 rate;
/* Update uCode's rate table. */
- rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
- rs_fill_link_cmd(mvm, lq_sta, rate);
+ rate = rate_n_flags_from_tbl(mvm, tbl, index);
+ rs_fill_link_cmd(mvm, sta, lq_sta, rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
@@ -1712,7 +1710,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
u16 rate_scale_index_msk = 0;
- u8 is_green = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
@@ -1754,11 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = rs_use_green(sta);
- is_green = lq_sta->is_green;
/* current tx rate */
index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
}
return;
}
@@ -1978,24 +1970,24 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
(current_tpt > (100 * tbl->expected_tpt[low]))))
scale_action = 0;
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
/*
* don't set scale_action, don't want to scale up if
* the rate scale doesn't otherwise think that is a
* good idea.
*/
} else if (lq_sta->last_bt_traffic <=
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
scale_action = -1;
}
}
lq_sta->last_bt_traffic =
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+ if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
- rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
rs_stay_in_table(lq_sta, false);
@@ -2071,7 +2063,7 @@ lq_update:
IWL_DEBUG_RATE(mvm,
"Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
- rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+ rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
} else {
done_search = 1;
@@ -2113,7 +2105,7 @@ lq_update:
}
out:
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+ tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
lq_sta->last_txrate_idx = index;
}
@@ -2140,7 +2132,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
int rate_idx;
int i;
u32 rate;
- u8 use_green = rs_use_green(sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
@@ -2172,10 +2163,10 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
rs_toggle_antenna(valid_tx_ant, &rate, tbl);
- rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+ rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
- rs_fill_link_cmd(NULL, lq_sta, rate);
+ rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
}
@@ -2190,7 +2181,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
- int rate_idx;
IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
@@ -2215,36 +2205,9 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
if (rate_control_send_low(sta, mvm_sta, txrc))
return;
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO3_6M_PLCP);
- if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
- info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
- info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
+ iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+ info->band, &info->control.rates[0]);
+
info->control.rates[0].count = 1;
}
@@ -2261,6 +2224,24 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
return &sta_priv->lq_sta;
}
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+ return IWL_RATE_MCS_7_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+ return IWL_RATE_MCS_8_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+ return IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ return -1;
+}
+
/*
* Called after adding a new station to initialize rate scaling
*/
@@ -2270,6 +2251,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int i, j;
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct iwl_mvm_sta *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = rs_use_green(sta);
lq_sta->band = sband->band;
/*
* active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
for_each_set_bit(i, &supp, BITS_PER_LONG)
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+ /* TODO: should probably account for rx_highest for both HT/VHT */
+ if (!vht_cap || !vht_cap->vht_supported) {
+ /* active_siso_rate mask includes 9 MBits (bit 5),
+ * and CCK (bits 0-3), supp_rates[] does not;
+ * shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->is_vht = false;
+ } else {
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+
+ /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+ lq_sta->is_vht = true;
+ }
IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X\n",
+ "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
lq_sta->active_siso_rate,
- lq_sta->active_mimo2_rate);
+ lq_sta->active_mimo2_rate,
+ lq_sta->is_vht);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta, u32 new_rate)
{
struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
&rate_idx);
-
/* Indicate to uCode which entries might be MIMO.
* If initial rate was MIMO, this will finally end up
* as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Don't allow HT rates after next pass.
- * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ * rs_get_lower_rate() will change type to LQ_LEGACY_A
+ * or LQ_LEGACY_G.
+ */
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
lq_cmd->agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
- /*
- * overwrite if needed, pass aggregation time limit
- * to uCode in uSec - This is racy - but heh, at least it helps...
- */
- if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
- lq_cmd->agg_time_limit = cpu_to_le16(1200);
+ if (sta)
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
(iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
+ (is_legacy(tbl->lq_type)) ? "legacy" :
+ is_vht(tbl->lq_type) ? "VHT" : "HT");
+ if (is_ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
+ (is_ht20(tbl)) ? "20MHz" :
+ (is_ht40(tbl)) ? "40MHz" :
+ (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+ desc += sprintf(buff+desc, " %s %s\n",
(tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
+ (lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
int desc = 0;
int i, j;
ssize_t ret;
-
+ struct iwl_scale_tbl_info *tbl;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
return -ENOMEM;
for (i = 0; i < LQ_SIZE; i++) {
+ tbl = &(lq_sta->lq_info[i]);
desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+ "%s type=%d SGI=%d BW=%s DUP=0\n"
"rate=0x%X\n",
lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
+ tbl->lq_type,
+ tbl->is_SGI,
+ is_ht20(tbl) ? "20Mhz" :
+ is_ht40(tbl) ? "40Mhz" :
+ is_ht80(tbl) ? "80Mhz" : "ERR",
+ tbl->current_rate);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
+ tbl->win[j].counter,
+ tbl->win[j].success_counter,
+ tbl->win[j].success_ratio);
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 335cf1682902..5d5344f7070b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -35,9 +35,11 @@
#include "iwl-trans.h"
struct iwl_rs_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_vht_siso;
+ u8 plcp_vht_mimo2;
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
};
@@ -83,35 +85,52 @@ enum {
#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-/* uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for HT/VHT bit rates */
enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_MIMO3_6M_PLCP = 0x10,
- IWL_RATE_MIMO3_12M_PLCP = 0x11,
- IWL_RATE_MIMO3_18M_PLCP = 0x12,
- IWL_RATE_MIMO3_24M_PLCP = 0x13,
- IWL_RATE_MIMO3_36M_PLCP = 0x14,
- IWL_RATE_MIMO3_48M_PLCP = 0x15,
- IWL_RATE_MIMO3_54M_PLCP = 0x16,
- IWL_RATE_MIMO3_60M_PLCP = 0x17,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+ IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+ IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+ IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+ IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+ IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+ IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+ IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+ IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+ IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+ IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+ IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+ IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+ IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+ IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+ IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+ IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+ IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+ IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+ IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+ IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
};
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@ enum {
#define IWL_RATE_DECREASE_TH 1920 /* 15% */
/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2 3
+enum {
+ IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_SWITCH_SISO,
+ IWL_LEGACY_SWITCH_MIMO2,
+ IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
+ IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
+};
/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2 2
-#define IWL_SISO_SWITCH_GI 3
+enum {
+ IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_SWITCH_MIMO2,
+ IWL_SISO_SWITCH_GI,
+ IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
+ IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
+};
/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_GI 4
+enum {
+ IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_SWITCH_SISO_B,
+ IWL_MIMO2_SWITCH_GI,
+ IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
+ IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
+};
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
#define IWL_ACTION_LIMIT 3 /* # possible actions */
@@ -188,20 +215,31 @@ enum {
enum iwl_table_type {
LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
+ LQ_LEGACY_G, /* legacy types */
+ LQ_LEGACY_A,
+ LQ_HT_SISO, /* HT types */
+ LQ_HT_MIMO2,
+ LQ_VHT_SISO, /* VHT types */
+ LQ_VHT_MIMO2,
LQ_MAX,
};
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) is_mimo2(tbl)
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
+#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
+#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
+#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
+#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
+#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
+#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
+#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
+#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
+#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
+
+#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -232,7 +270,7 @@ struct iwl_scale_tbl_info {
enum iwl_table_type lq_type;
u8 ant_type;
u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
+ u32 bw; /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@ struct iwl_lq_sta {
u64 flush_timer; /* time staying in mode before new search */
u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
+ bool is_vht;
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -314,9 +352,8 @@ static inline u8 num_of_ant(u8 mask)
}
/* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum ieee80211_band band);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +365,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
* ieee80211_register_hw
*
*/
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +373,7 @@ extern int iwl_mvm_rate_control_register(void);
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
*/
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 2a8cb5a60535..3a1f3982109d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -300,10 +300,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
- return 0;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
@@ -422,6 +426,27 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->bf_data.ave_beacon_signal = sig;
+ /* BT Coex */
+ if (mvmvif->bf_data.bt_coex_min_thold !=
+ mvmvif->bf_data.bt_coex_max_thold) {
+ last_event = mvmvif->bf_data.last_bt_coex_event;
+ if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+ (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+ } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+ (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+ }
+ }
+
if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 621fb71f282a..dff7592e1ff8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -74,8 +74,12 @@
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
u16 rx_chain;
- u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+ u8 rx_ant;
+ if (mvm->scan_rx_ant != ANT_NONE)
+ rx_ant = mvm->scan_rx_ant;
+ else
+ rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
{
- if (vif->bss_conf.assoc)
- return cpu_to_le32(vif->bss_conf.beacon_int);
- else
+ if (!vif->bss_conf.assoc)
return 0;
+
+ return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
}
static inline __le32
@@ -133,11 +137,12 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
* request.
*/
static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ int first)
{
int fw_idx, req_idx;
- for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). The first SSID is already
- * included in the probe template, so we need to set only req->n_ssids - 1 bits
- * in addition to the first bit.
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -170,7 +175,8 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
}
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
- struct cfg80211_scan_request *req)
+ struct cfg80211_scan_request *req,
+ bool basic_ssid)
{
u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
+ int type = BIT(req->n_ssids) - 1;
+
+ if (!basic_ssid)
+ type |= BIT(req->n_ssids);
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
- chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
+ chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
+ bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ if (basic_ssid) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
+ }
} else {
cmd->passive2active = 0;
cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
- iwl_mvm_scan_fill_ssids(cmd, req);
+ iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
- iwl_mvm_scan_fill_channels(cmd, req);
+ iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+ if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ }
+
+ return 0;
+}
+
static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@@ -447,3 +476,406 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
}
+
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted");
+
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_sched_scan_stopped(mvm->hw);
+
+ return 0;
+}
+
+static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sched_scan_ies *ies,
+ enum ieee80211_band band,
+ struct iwl_tx_cmd *cmd,
+ u8 *data)
+{
+ u16 cmd_len;
+
+ cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ cmd->sta_id = mvm->aux_sta.sta_id;
+
+ cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
+
+ cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
+ vif->addr,
+ 1, NULL, 0,
+ ies->ie[band], ies->len[band],
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ cmd->len = cpu_to_le16(cmd_len);
+}
+
+static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan)
+{
+ scan->channel_count =
+ mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+ scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
+ scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+ scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
+ scan->rep_count = cpu_to_le32(1);
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ int i;
+
+ for (i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ break;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list->ssid, ssid, ssid_len))
+ return i;
+ }
+ return -1;
+}
+
+static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_offload_cmd *scan,
+ u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+
+ /*
+ * copy SSIDs from match list.
+ * iwl_config_sched_scan_profiles() uses the order of these ssids to
+ * config match list.
+ */
+ for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+ scan->direct_scan[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ *ssid_bitmap = 0;
+ for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
+ index = iwl_ssid_exist(req->ssids[j].ssid,
+ req->ssids[j].ssid_len,
+ scan->direct_scan);
+ if (index < 0) {
+ if (!req->ssids[j].ssid_len)
+ continue;
+ scan->direct_scan[i].id = WLAN_EID_SSID;
+ scan->direct_scan[i].len = req->ssids[j].ssid_len;
+ memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
+ scan->direct_scan[i].len);
+ *ssid_bitmap |= BIT(i + 1);
+ i++;
+ } else {
+ *ssid_bitmap |= BIT(index + 1);
+ }
+ }
+}
+
+static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req,
+ struct iwl_scan_channel_cfg *channels,
+ enum ieee80211_band band,
+ int *head, int *tail,
+ u32 ssid_bitmap)
+{
+ struct ieee80211_supported_band *s_band;
+ int n_probes = req->n_ssids;
+ int n_channels = req->n_channels;
+ u8 active_dwell, passive_dwell;
+ int i, j, index = 0;
+ bool partial;
+
+ /*
+ * We have to configure all supported channels, even if we don't want to
+ * scan on them, but we have to send channels in the order that we want
+ * to scan. So add requested channels to head of the list and others to
+ * the end.
+ */
+ active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
+ passive_dwell = iwl_mvm_get_passive_dwell(band);
+ s_band = &mvm->nvm_data->bands[band];
+
+ for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
+ partial = false;
+ for (j = 0; j < n_channels; j++)
+ if (s_band->channels[i].center_freq ==
+ req->channels[j]->center_freq) {
+ index = *head;
+ (*head)++;
+ /*
+ * Channels that came with the request will be
+ * in partial scan .
+ */
+ partial = true;
+ break;
+ }
+ if (!partial) {
+ index = *tail;
+ (*tail)--;
+ }
+ channels->channel_number[index] =
+ cpu_to_le16(ieee80211_frequency_to_channel(
+ s_band->channels[i].center_freq));
+ channels->dwell_time[index][0] = active_dwell;
+ channels->dwell_time[index][1] = passive_dwell;
+
+ channels->iter_count[index] = cpu_to_le16(1);
+ channels->iter_interval[index] = 0;
+
+ if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
+
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
+ if (partial)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+
+ if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+ channels->type[index] |=
+ cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
+
+ /* scan for all SSIDs from req->ssids */
+ channels->type[index] |= cpu_to_le32(ssid_bitmap);
+ }
+}
+
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies)
+{
+ int supported_bands = 0;
+ int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
+ int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ int head = 0;
+ int tail = band_2ghz + band_5ghz;
+ u32 ssid_bitmap;
+ int cmd_len;
+ int ret;
+
+ struct iwl_scan_offload_cfg *scan_cfg;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_CONFIG_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (band_2ghz)
+ supported_bands++;
+ if (band_5ghz)
+ supported_bands++;
+
+ cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+ supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+ scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
+ if (!scan_cfg)
+ return -ENOMEM;
+
+ iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+ scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
+
+ iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+ /* build tx frames for supported bands */
+ if (band_2ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_2GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[0],
+ scan_cfg->data);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_2GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+ if (band_5ghz) {
+ iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+ IEEE80211_BAND_5GHZ,
+ &scan_cfg->scan_cmd.tx_cmd[1],
+ scan_cfg->data +
+ SCAN_OFFLOAD_PROBE_REQ_SIZE);
+ iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+ IEEE80211_BAND_5GHZ, &head, &tail,
+ ssid_bitmap);
+ }
+
+ cmd.data[0] = scan_cfg;
+ cmd.len[0] = cmd_len;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(scan_cfg);
+ return ret;
+}
+
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .flags = CMD_SYNC,
+ .len[1] = sizeof(*profile_cfg),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int blacklist_len;
+ int i;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ return -EIO;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+ blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+ else
+ blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+ blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+ if (!blacklist)
+ return -ENOMEM;
+
+ profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+ if (!profile_cfg) {
+ ret = -ENOMEM;
+ goto free_blacklist;
+ }
+
+ cmd.data[0] = blacklist;
+ cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+ cmd.data[1] = profile_cfg;
+
+ /* No blacklist configuration */
+
+ profile_cfg->num_profiles = req->n_match_sets;
+ profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+ profile->ssid_index = i;
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = 0xff;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(profile_cfg);
+free_blacklist:
+ kfree(blacklist);
+
+ return ret;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_req scan_req = {
+ .watchdog = IWL_SCHED_SCAN_WATCHDOG,
+
+ .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
+ .schedule_line[0].delay = req->interval / 1000,
+ .schedule_line[0].full_scan_mul = 1,
+
+ .schedule_line[1].iterations = 0xff,
+ .schedule_line[1].delay = req->interval / 1000,
+ .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
+ };
+
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, filter len %d\n",
+ req->n_match_sets);
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
+ } else {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending Scheduled scan without filtering\n");
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+ sizeof(scan_req), &scan_req);
+}
+
+static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_ABORT_CMD,
+ .flags = CMD_SYNC,
+ };
+ u32 status;
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * scheduled scan currently */
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+ return -EIO;
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+ if (ret)
+ return ret;
+
+ if (status != CAN_ABORT_STATUS) {
+ /*
+ * The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before the
+ * microcode has notified us that a scan is completed.
+ */
+ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
+ IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+ return;
+ }
+
+ ret = iwl_mvm_send_sched_scan_abort(mvm);
+ if (ret)
+ IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+ else
+ IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 44add291531b..329952363a54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,6 +66,115 @@
#include "sta.h"
#include "rs.h"
+static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+ struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+ memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+ cmd_v5->add_modify = cmd_v6->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v6->sta_id;
+ cmd_v5->modify_mask = cmd_v6->modify_mask;
+ cmd_v5->station_flags = cmd_v6->station_flags;
+ cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v6->assoc_id;
+ cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+static void
+iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
+ struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
+ u32 mac_id_n_color)
+{
+ memset(sta_cmd, 0, sizeof(*sta_cmd));
+
+ sta_cmd->sta_id = key_cmd->sta_id;
+ sta_cmd->add_modify = STA_MODE_MODIFY;
+ sta_cmd->modify_mask = STA_MODIFY_KEY;
+ sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
+
+ sta_cmd->key.key_offset = key_cmd->key_offset;
+ sta_cmd->key.key_flags = key_cmd->key_flags;
+ memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
+ sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
+ memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
+ sizeof(sta_cmd->key.tkip_rx_ttak));
+}
+
+static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
+ cmd, status);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
+ &cmd_v5, status);
+}
+
+static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
+ struct iwl_mvm_add_sta_cmd_v6 *cmd)
+{
+ struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
+ &cmd_v5);
+}
+
+static int
+iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color,
+ int *status)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
+ sizeof(*cmd), cmd, status);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
+ &sta_cmd, status);
+}
+
+static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
+ u32 flags,
+ struct iwl_mvm_add_sta_key_cmd *cmd,
+ u32 mac_id_n_color)
+{
+ struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+ return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
+ &sta_cmd);
+}
+
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
{
int sta_id;
@@ -87,7 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
- &add_sta_cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
if (ret)
return ret;
@@ -229,8 +337,12 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
- /* for HW restart - need to reset the seq_number etc... */
- memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+ /* for HW restart - reset everything but the sequence number */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_sta->tid_data[i].seq_number;
+ memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+ mvm_sta->tid_data[i].seq_number = seq;
+ }
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
@@ -256,7 +368,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -269,8 +381,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -469,13 +580,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd cmd;
+ struct iwl_mvm_add_sta_cmd_v6 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -485,8 +596,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -534,10 +644,14 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *bsta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ static const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = vif->bss_conf.bssid;
+
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
@@ -614,7 +728,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -638,8 +752,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -674,7 +787,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {};
int ret;
u32 status;
@@ -696,8 +809,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -743,13 +855,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
- txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+ for (txq_id = mvm->first_agg_queue;
+ txq_id <= mvm->last_agg_queue; txq_id++)
if (mvm->queue_to_mac80211[txq_id] ==
IWL_INVALID_MAC80211_QUEUE)
break;
- if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+ if (txq_id > mvm->last_agg_queue) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
return -EIO;
}
@@ -987,10 +1099,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u32 cmd_flags)
{
__le16 key_flags;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
int ret, status;
u16 keyidx;
int i;
+ u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
- cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ cmd.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
- cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
- memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
default:
WARN_ON(1);
@@ -1017,20 +1130,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_offset = keyconf->hw_key_idx;
- cmd.key.key_flags = key_flags;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_KEY;
+ cmd.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mac_id_n_color,
+ &status);
else
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
+ mac_id_n_color);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_sta *mvm_sta;
- struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
int ret, status;
u8 sta_id;
@@ -1252,17 +1363,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.key.key_flags = key_flags;
- cmd.key.key_offset = keyconf->hw_key_idx;
+ cmd.key_flags = key_flags;
+ cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
- cmd.modify_mask = STA_MODIFY_KEY;
- cmd.add_modify = STA_MODE_MODIFY;
-
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+ mvm_sta->mac_id_n_color,
+ &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1331,7 +1439,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd cmd = {
+ struct iwl_mvm_add_sta_cmd_v6 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
int ret;
/* TODO: somehow the fw doesn't seem to take PS_POLL into account */
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 94b265eb32b8..4dfc359a4bdd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -293,10 +293,6 @@ struct iwl_mvm_sta {
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
-#ifdef CONFIG_PM_SLEEP
- u16 last_seq_ctl;
-#endif
-
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000000..eb74391d91ca
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 76a3c177e100..33cf56fdfc41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -387,7 +387,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration)
+ u32 duration, u32 min_duration,
+ u32 max_delay)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
time_cmd.max_frags = TE_V2_FRAG_NONE;
- time_cmd.max_delay = cpu_to_le32(500);
+ time_cmd.max_delay = cpu_to_le32(max_delay);
/* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1);
time_cmd.duration = cpu_to_le32(duration);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index f86c51065ed3..d9c8d6cfa2db 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -123,6 +123,7 @@
* @duration: the duration of the session in TU.
* @min_duration: will start a new session if the current session will end
* in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
*
* This function can be used to start a session protection which means that the
* fw will stay on the channel for %duration_ms milliseconds. This function
@@ -133,7 +134,8 @@
*/
void iwl_mvm_protect_session(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration);
+ u32 duration, u32 min_duration,
+ u32 max_delay);
/**
* iwl_mvm_stop_session_protection - cancel the session protection.
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e05440d90319..43d97c33a75a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+ if (txq_id < mvm->first_agg_queue)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -511,16 +511,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-/**
- * translate ucode response to mac80211 tx status control values
- */
-static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
- struct ieee80211_tx_info *info)
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct ieee80211_tx_rate *r)
{
- struct ieee80211_tx_rate *r = &info->status.rates[0];
-
- info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@ static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
r->flags |= IEEE80211_TX_RC_VHT_MCS;
} else {
r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- info->band);
+ band);
}
}
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -602,11 +609,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
}
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+ info);
/* Single frame failure in an AMPDU queue => send BAR */
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
+ if (txq_id >= mvm->first_agg_queue &&
!(info->flags & IEEE80211_TX_STAT_ACK))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -619,7 +626,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
ieee80211_tx_status_ni(mvm->hw, skb);
}
- if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
+ if (txq_id >= mvm->first_agg_queue) {
/* If this is an aggregation queue, we use the ssn since:
* ssn = wifi seq_num % 256.
* The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
-
-#ifdef CONFIG_PM_SLEEP
- mvmsta->last_seq_ctl = seq_ctl;
-#endif
} else {
sta = NULL;
mvmsta = NULL;
@@ -681,7 +684,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+ if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
if (mvmsta) {
/*
@@ -777,7 +780,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct ieee80211_sta *sta;
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
+ if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
- info);
+ iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
+ info);
}
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a9c357491434..ed69e9b78e82 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -466,7 +466,7 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
len = img->sec[IWL_UCODE_SECTION_DATA].len;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 26108a1a29fa..941c0c88f982 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -268,7 +268,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
@@ -350,6 +350,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -391,7 +394,6 @@ out_free_drv:
iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -402,8 +404,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_drv_stop(trans_pcie->drv);
iwl_trans_pcie_free(trans);
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index c3f904d422b0..5d9337bec67a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -220,6 +220,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+
set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
out:
@@ -443,22 +446,138 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
+static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+{
+ int shift_param;
+ u32 address;
+ int ret = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ } else {
+ shift_param = 16;
+ address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ }
+
+ /* set CPU to started */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_STARTED << shift_param,
+ 1);
+
+ /* set last complete descriptor number */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set last loaded block */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
+ << shift_param,
+ 1);
+
+ /* image loading complete */
+ iwl_trans_set_bits_mask(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ CSR_CPU_STATUS_LOADING_COMPLETED
+ << shift_param,
+ 1);
+
+ /* set FH_TCSR_0_REG */
+ iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
+
+ /* verify image verification started */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+ CSR_SECURE_TIME_OUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "secure boot process didn't start\n");
+ return ret;
+ }
+
+ /* wait for image verification to complete */
+ ret = iwl_poll_bit(trans, address,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+ CSR_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ IWL_DEBUG_FW(trans,
+ "working with %s image\n",
+ image->is_secure ? "Secured" : "Non Secured");
+ IWL_DEBUG_FW(trans,
+ "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ if (image->is_secure) {
+ /* set secure boot inspector addresses */
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
+ iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
+
+ /* release CPU1 reset if secure inspector image burned in OTP */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ /* load to FW the binary sections of CPU1 */
+ IWL_DEBUG_INFO(trans, "Loading CPU1\n");
+ for (i = 0;
+ i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i++) {
if (!image->sec[i].data)
break;
-
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
+ /* configure the ucode to start secure process on CPU1 */
+ if (image->is_secure) {
+ /* config CPU1 to start secure protocol */
+ ret = iwl_pcie_secure_set(trans, 1);
+ if (ret)
+ return ret;
+ } else {
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(trans, CSR_RESET, 0);
+ }
+
+ if (image->is_dual_cpus) {
+ /* load to FW the binary sections of CPU2 */
+ IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
+ for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+ i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].data)
+ break;
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (image->is_secure) {
+ /* set CPU2 for secure protocol */
+ ret = iwl_pcie_secure_set(trans, 2);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 1424335163b9..059c5acad3a0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1465,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1493,16 +1494,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
+ int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))) {
- IWL_ERR(trans, "Command %s: a command is already active!\n",
- get_cmd_string(trans_pcie, cmd->id));
+ if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ "Command %s: a command is already active!\n",
+ get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
- }
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1517,10 +1518,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
- HOST_COMPLETE_TIMEOUT);
+ while (timeout > 0) {
+ unsigned long flags;
+
+ timeout -= COMMAND_POKE_TIMEOUT;
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ COMMAND_POKE_TIMEOUT);
+ if (ret)
+ break;
+ /* poke the device - it may have lost the command */
+ if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+ iwl_trans_release_nic_access(trans, &flags);
+ IWL_DEBUG_INFO(trans,
+ "Tried to wake NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ } else {
+ IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ break;
+ }
+ }
+
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
struct iwl_txq *txq =
@@ -1541,6 +1561,9 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
"Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
+
+ iwl_op_mode_nic_error(trans->op_mode);
+
goto cancel;
}
}
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index c0f9e7e862f6..51b92b5df119 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -53,6 +53,11 @@ static void main_firmware_cb(const struct firmware *firmware, void *context)
/* Firmware found! */
lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+ if (priv->helper_fw) {
+ release_firmware (priv->helper_fw);
+ priv->helper_fw = NULL;
+ }
+ release_firmware (firmware);
}
static void helper_firmware_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index c94dd6802672..ef8c98e21098 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -754,14 +754,14 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
if (ret == 0 && (card->model != MODEL_8305))
ret = if_cs_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
/* Now actually get the IRQ */
ret = request_irq(card->p_dev->irq, if_cs_interrupt,
IRQF_SHARED, DRV_NAME, card);
if (ret) {
pr_err("error in request_irq\n");
- goto out;
+ return;
}
/*
@@ -777,10 +777,6 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
pr_err("could not activate card\n");
free_irq(card->p_dev->irq, card);
}
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 45578335e420..991238afd1b6 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -708,20 +708,16 @@ static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
ret = if_sdio_prog_helper(card, helper);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Helper firmware loaded\n");
ret = if_sdio_prog_real(card, mainfw);
if (ret)
- goto out;
+ return;
lbs_deb_sdio("Firmware loaded\n");
if_sdio_finish_power_on(card);
-
-out:
- release_firmware(helper);
- release_firmware(mainfw);
}
static int if_sdio_prog_firmware(struct if_sdio_card *card)
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 4bb6574f4073..83669151bb82 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1094,11 +1094,7 @@ static int if_spi_init_card(struct if_spi_card *card)
goto out;
out:
- release_firmware(helper);
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
-
return err;
}
@@ -1128,7 +1124,7 @@ static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 27980778d992..dff08a2896a3 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -844,7 +844,7 @@ static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
cardp->fw = fw;
if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
ret = -EINVAL;
- goto release_fw;
+ goto done;
}
/* Cancel any pending usb business */
@@ -861,7 +861,7 @@ restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@ restart:
usb_kill_urb(cardp->tx_urb);
if (if_usb_submit_rx_urb(cardp) < 0)
ret = -EIO;
- goto release_fw;
+ goto done;
} else if (cardp->bootcmdresp <= 0) {
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
ret = -EIO;
- goto release_fw;
+ goto done;
}
i = 0;
@@ -921,14 +921,14 @@ restart:
pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -EIO;
- goto release_fw;
+ goto done;
}
cardp->priv->fw_ready = 1;
if_usb_submit_rx_urb(cardp);
if (lbs_start_card(priv))
- goto release_fw;
+ goto done;
if_usb_setup_firmware(priv);
@@ -939,11 +939,8 @@ restart:
if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
priv->ehs_remove_supported = false;
- release_fw:
- release_firmware(cardp->fw);
- cardp->fw = NULL;
-
done:
+ cardp->fw = NULL;
lbs_deb_leave(LBS_DEB_USB);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2cd3f54e1efa..de0df86704e7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -167,6 +167,7 @@ struct hwsim_vif_priv {
u32 magic;
u8 bssid[ETH_ALEN];
bool assoc;
+ bool bcn_en;
u16 aid;
};
@@ -1170,6 +1171,16 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
*total_flags = data->rx_filter;
}
+static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned int *count = data;
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ if (vp->bcn_en)
+ (*count)++;
+}
+
static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -1180,7 +1191,8 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
hwsim_check_magic(vif);
- wiphy_debug(hw->wiphy, "%s(changed=0x%x)\n", __func__, changed);
+ wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+ __func__, changed, vif->addr);
if (changed & BSS_CHANGED_BSSID) {
wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
@@ -1202,6 +1214,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
wiphy_debug(hw->wiphy, " BCN EN: %d\n", info->enable_beacon);
+ vp->bcn_en = info->enable_beacon;
if (data->started &&
!hrtimer_is_queued(&data->beacon_timer.timer) &&
info->enable_beacon) {
@@ -1215,8 +1228,16 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
tasklet_hrtimer_start(&data->beacon_timer,
ns_to_ktime(until_tbtt * 1000),
HRTIMER_MODE_REL);
- } else if (!info->enable_beacon)
- tasklet_hrtimer_cancel(&data->beacon_timer);
+ } else if (!info->enable_beacon) {
+ unsigned int count = 0;
+ ieee80211_iterate_active_interfaces(
+ data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_bcn_en_iter, &count);
+ wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
+ count);
+ if (count == 0)
+ tasklet_hrtimer_cancel(&data->beacon_timer);
+ }
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index a6c46f3b6e3a..e47f4e3012b8 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- uint16_t cancel_scan_cmd = false;
+ bool cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 37f873bb342f..4e4686e6ac09 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
int ret = 0;
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
- u8 enable_data = true;
+ bool enable_data = true;
u16 cap_info, status_code;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index c2b91f566e05..9d7c9d354d34 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -882,7 +882,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!adapter->workqueue)
goto err_kmalloc;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 52da8ee7599a..33fa9432b241 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
}
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c0268b597748..7d66018a2e33 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
- u16 hs_activate = false;
+ bool hs_activate = false;
if (!hscfg_param)
/* New Activate command */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 95fa3599b407..5dd0ccc70b86 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
- int valid = true;
+ bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 644d6e0c51cc..0f129d498fb1 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
}
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
- struct sk_buff *skb);
+ struct sk_buff *skb);
void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ra,
- int tid);
+ struct mwifiex_ra_list_tbl *ra, int tid);
int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list, int tid);
u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
- const struct sk_buff *skb);
+ const struct sk_buff *skb);
void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
- u8 **assoc_buf,
- struct ieee_types_wmm_parameter
- *wmmie,
- struct ieee80211_ht_cap
- *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+ u8 **assoc_buf,
+ struct ieee_types_wmm_parameter *wmmie,
+ struct ieee80211_ht_cap *htcap);
void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
- struct ieee_types_wmm_parameter
- *wmm_ie);
+ struct ieee_types_wmm_parameter *wmm_ie);
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
- const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+ const struct host_cmd_ds_command *resp);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a3707fd4ef62..b953ad621e0b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -6093,7 +6093,6 @@ err_iounmap:
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 3bb936b9558c..eebd2be21ee9 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -182,23 +182,20 @@ extern int orinoco_debug;
/* Exported prototypes */
/********************************************************************/
-extern struct orinoco_private *alloc_orinocodev(
- int sizeof_card, struct device *device,
- int (*hard_reset)(struct orinoco_private *),
- int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
- unsigned long base_addr,
- unsigned int irq,
- const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+ int (*hard_reset)(struct orinoco_private *),
+ int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+ unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
int orinoco_process_xmit_skb(struct sk_buff *skb,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index d73fdf6185a2..ffb2469eb679 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 677bf14eca84..5ae1191d2532 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2559dbd6184b..bbd36d1676ff 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 42afeeea2c40..04b08de5fd5d 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
- pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->bridge_io);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 57e3af8ebb4b..f9a07b0d83ac 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7fc46f26cf2b..de15171e2cd8 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
gpio_direction_input(p54spi_gpio_irq);
ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
- p54spi_interrupt, IRQF_DISABLED, "p54spi",
+ p54spi_interrupt, 0, "p54spi",
priv->spi);
if (ret < 0) {
dev_err(&priv->spi->dev, "request_irq() failed");
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1c22b81e6ef3..8863a6cb2388 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
data = r.ptr;
/* copy this MAC to the bss */
- memcpy(bss.address, data, 6);
+ memcpy(bss.address, data, ETH_ALEN);
kfree(data);
/* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
return -EINVAL;
/* prepare the structure for the set object */
- memcpy(&bssid[0], awrq->sa_data, 6);
+ memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
/* set the bssid -- does this make sense when in AP mode? */
rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
- memcpy(awrq->sa_data, r.ptr, 6);
+ memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
kfree(r.ptr);
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
size_t wpa_ie_len;
/* The first entry must be the MAC address */
- memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+ memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
iwe.cmd = SIOCGIWAP;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
&((struct sockaddr *) addr)->sa_data);
if (!ret)
memcpy(priv->ndev->dev_addr,
- &((struct sockaddr *) addr)->sa_data, 6);
+ &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
return ret;
}
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5970ff6f40cc..41a16d30c79c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
/* ndev->set_multicast_list = &islpci_set_multicast_list; */
ndev->addr_len = ETH_ALEN;
/* Get a non-zero dummy MAC address for nameif. Jean II */
- memcpy(ndev->dev_addr, dummy_mac, 6);
+ memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index a01606b36e03..056af38e72e3 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
isl_oid[GEN_OID_MACADDRESS].size, &res);
if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
- memcpy(priv->ndev->dev_addr, res->data, 6);
+ memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
else
ret = -EIO;
if (res)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 68dbbb9c6d12..006b8bcb2e31 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -58,11 +58,11 @@ config RT61PCI
config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
- depends on PCI || SOC_RT288X || SOC_RT305X
+ depends on PCI
select RT2800_LIB
+ select RT2800_LIB_MMIO
select RT2X00_LIB_MMIO
- select RT2X00_LIB_PCI if PCI
- select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_PCI
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_CCITT
@@ -199,9 +199,30 @@ config RT2800USB_UNKNOWN
endif
+config RT2800SOC
+ tristate "Ralink WiSoC support"
+ depends on SOC_RT288X || SOC_RT305X
+ select RT2X00_LIB_SOC
+ select RT2X00_LIB_MMIO
+ select RT2X00_LIB_CRYPTO
+ select RT2X00_LIB_FIRMWARE
+ select RT2800_LIB
+ select RT2800_LIB_MMIO
+ ---help---
+ This adds support for Ralink WiSoC devices.
+ Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
+
+ When compiled as a module, this driver will be called rt2800soc.
+
+
config RT2800_LIB
tristate
+config RT2800_LIB_MMIO
+ tristate
+ select RT2X00_LIB_MMIO
+ select RT2800_LIB
+
config RT2X00_LIB_MMIO
tristate
@@ -219,6 +240,7 @@ config RT2X00_LIB_USB
config RT2X00_LIB
tristate
+ select AVERAGE
config RT2X00_LIB_FIRMWARE
boolean
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b67..24a66015a495 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
+obj-$(CONFIG_RT2800_LIB_MMIO) += rt2800mmio.o
obj-$(CONFIG_RT2400PCI) += rt2400pci.o
obj-$(CONFIG_RT2500PCI) += rt2500pci.o
obj-$(CONFIG_RT61PCI) += rt61pci.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_RT2800PCI) += rt2800pci.o
obj-$(CONFIG_RT2500USB) += rt2500usb.o
obj-$(CONFIG_RT73USB) += rt73usb.o
obj-$(CONFIG_RT2800USB) += rt2800usb.o
+obj-$(CONFIG_RT2800SOC) += rt2800soc.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3d53a09da5a1..38ed9a3e44c8 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
entry->queue->rt2x00dev->rssi_offset;
rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index fa33b5edf931..aab6b5e4f5dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -52,6 +52,7 @@
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5592 0x000f
+#define RF3070 0x3070
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370
@@ -122,7 +124,7 @@
/*
* MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
*/
-#define MAC_CSR0_3290 0x0000
+#define MAC_CSR0_3290 0x0000
/*
* E2PROM_CSR: PCI EEPROM control register.
@@ -211,17 +213,17 @@
/*
* COEX_CFG_0
*/
-#define COEX_CFG0 0x0040
+#define COEX_CFG0 0x0040
#define COEX_CFG_ANT FIELD32(0xff000000)
/*
* COEX_CFG_1
*/
-#define COEX_CFG1 0x0044
+#define COEX_CFG1 0x0044
/*
* COEX_CFG_2
*/
-#define COEX_CFG2 0x0048
+#define COEX_CFG2 0x0048
#define BT_COEX_CFG1 FIELD32(0xff000000)
#define BT_COEX_CFG0 FIELD32(0x00ff0000)
#define WL_COEX_CFG1 FIELD32(0x0000ff00)
@@ -235,8 +237,8 @@
#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
#define PLL_CONTROL FIELD32(0x00070000)
#define PLL_LPF_R1 FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
#define PLL_LOCK_CTRL FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
-#define RFCSR6_TXDIV FIELD8(0x0c)
+#define RFCSR6_TXDIV FIELD8(0x0c)
/* bits for RF3053 */
#define RFCSR6_VCO_IC FIELD8(0xc0)
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
-#define RFCSR12_DR0 FIELD8(0xe0)
+#define RFCSR12_DR0 FIELD8(0xe0)
/*
* RFCSR 13:
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
-#define RFCSR13_DR0 FIELD8(0xe0)
+#define RFCSR13_DR0 FIELD8(0xe0)
/*
* RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
-#define RFCSR17_CODE FIELD8(0x7f)
+#define RFCSR17_CODE FIELD8(0x7f)
/* RFCSR 18 */
#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
*/
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
/*
* EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
* DAC_TEST: 0: disable, 1: enable
*/
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
/*
* EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
#define MCU_CURRENT 0x36
#define MCU_LED 0x50
#define MCU_LED_STRENGTH 0x51
-#define MCU_LED_AG_CONF 0x52
+#define MCU_LED_AG_CONF 0x52
#define MCU_LED_ACT_CONF 0x53
#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
-#define MCU_BAND_SELECT 0x91
+#define MCU_BAND_SELECT 0x91
/*
* MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88ce656f96cd..c5738f14c4ba 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
[EEPROM_LNA] = 0x0026,
[EEPROM_EXT_LNA2] = 0x0027,
[EEPROM_RSSI_BG] = 0x0028,
- [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
[EEPROM_RSSI_BG2] = 0x0029,
- [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
[EEPROM_RSSI_A] = 0x002a,
[EEPROM_RSSI_A2] = 0x002b,
- [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
[EEPROM_TXPOWER_BG1] = 0x0030,
[EEPROM_TXPOWER_BG2] = 0x0037,
[EEPROM_EXT_TXPOWER_BG3] = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_read(rt2x00dev, 3, &r3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2800_config_3572bt_ant(rt2x00dev);
/*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
else
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
break;
case 2:
if (rt2x00_rt(rt2x00dev, RT3572) &&
- test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
int idx = rf->channel-1;
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3070:
case RF5360:
case RF5370:
case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
- if (rt2x00_rf(rt2x00dev, RF3290) ||
+ if (rt2x00_rf(rt2x00dev, RF3070) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392)) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
- &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
rf->channel > 14);
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev))
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
else
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
- if (rt2x00_rt(rt2x00dev, RT3572))
+ if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + (2 * rt2x00dev->lna_gain);
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3593)) {
- if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
- /* Band selection. GPIO #8 controls all paths */
+ /* Band selection */
+ if (rt2x00_is_usb(rt2x00dev) ||
+ rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #8 controls all paths */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+ }
+ /* LNA PE control. */
+ if (rt2x00_is_usb(rt2x00dev)) {
+ /* GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
- /* LNA PE control.
- * GPIO #4 controls PE0 and PE1,
- * GPIO #7 controls PE2
- */
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
- rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ /* GPIO #4 controls PE0, PE1 and PE2 */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
}
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
{
int delta;
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_power_limit(rt2x00dev))
return 0;
/*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
- if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
* We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3572))
- vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
- if (rt2x00_rt(rt2x00dev, RT5592)) {
+ if (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+ vgc_level);
+ } else if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
- } else
+ } else {
rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+ }
+
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
- /*
- * When RSSI is better then -80 increase VGC level with 0x10, except
- * for rt5592 chip.
+
+ /* When RSSI is better than a certain threshold, increase VGC
+ * with a chip specific value in order to improve the balance
+ * between sensibility and noise isolation.
*/
vgc = rt2800_get_default_vgc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
- vgc += 0x20;
- else if (qual->rssi > -80)
- vgc += 0x10;
+ switch (rt2x00dev->chip.rt) {
+ case RT3572:
+ case RT3593:
+ if (qual->rssi > -65) {
+ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ vgc += 0x20;
+ else
+ vgc += 0x10;
+ }
+ break;
+
+ case RT5592:
+ if (qual->rssi > -65)
+ vgc += 0x20;
+ break;
+
+ default:
+ if (qual->rssi > -80)
+ vgc += 0x10;
+ break;
+ }
rt2800_set_vgc(rt2x00dev, qual, vgc);
}
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
- if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
- rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
- rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
- rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
rt2800_rf_init_calibration(rt2x00dev, 2);
rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
- rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
u16 word;
/*
- * Initialize all registers.
+ * Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
+ /*
+ * Wait BBP/RF to wake up.
+ */
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
- * Send signal to firmware during boot time.
+ * Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
+ /*
+ * Make sure BBP is up and running.
+ */
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
+ /*
+ * Initialize BBP/RF registers.
+ */
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF3320:
case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
/*
* RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
*/
static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
-static const struct rf_channel rf_vals_3053[] = {
- /* Channel, N, R, K */
- {1, 241, 2, 2},
- {2, 241, 2, 7},
- {3, 242, 2, 2},
- {4, 242, 2, 7},
- {5, 243, 2, 2},
- {6, 243, 2, 7},
- {7, 244, 2, 2},
- {8, 244, 2, 7},
- {9, 245, 2, 2},
- {10, 245, 2, 7},
- {11, 246, 2, 2},
- {12, 246, 2, 7},
- {13, 247, 2, 2},
- {14, 248, 2, 4},
-
- {36, 0x56, 0, 4},
- {38, 0x56, 0, 6},
- {40, 0x56, 0, 8},
- {44, 0x57, 0, 0},
- {46, 0x57, 0, 2},
- {48, 0x57, 0, 4},
- {52, 0x57, 0, 8},
- {54, 0x57, 0, 10},
- {56, 0x58, 0, 0},
- {60, 0x58, 0, 4},
- {62, 0x58, 0, 6},
- {64, 0x58, 0, 8},
-
- {100, 0x5B, 0, 8},
- {102, 0x5B, 0, 10},
- {104, 0x5C, 0, 0},
- {108, 0x5C, 0, 4},
- {110, 0x5C, 0, 6},
- {112, 0x5C, 0, 8},
-
- /* NOTE: Channel 114 has been removed intentionally.
- * The EEPROM contains no TX power values for that,
- * and it is disabled in the vendor driver as well.
- */
-
- {116, 0x5D, 0, 0},
- {118, 0x5D, 0, 2},
- {120, 0x5D, 0, 4},
- {124, 0x5D, 0, 8},
- {126, 0x5D, 0, 10},
- {128, 0x5E, 0, 0},
- {132, 0x5E, 0, 4},
- {134, 0x5E, 0, 6},
- {136, 0x5E, 0, 8},
- {140, 0x5F, 0, 0},
-
- {149, 0x5F, 0, 9},
- {151, 0x5F, 0, 11},
- {153, 0x60, 0, 1},
- {157, 0x60, 0, 5},
- {159, 0x60, 0, 7},
- {161, 0x60, 0, 9},
- {165, 0x61, 0, 1},
- {167, 0x61, 0, 3},
- {169, 0x61, 0, 5},
- {171, 0x61, 0, 7},
- {173, 0x61, 0, 9},
-};
-
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7473,7 +7450,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power2;
char *default_power3;
unsigned int i;
- u16 eeprom;
u32 reg;
/*
@@ -7522,48 +7498,48 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-
/*
* Initialize hw_mode information.
*/
- spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720)) {
+ switch (rt2x00dev->chip.rf) {
+ case RF2720:
+ case RF2820:
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) ||
- rt2x00_rf(rt2x00dev, RF2750)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF2750:
+ case RF2850:
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3290) ||
- rt2x00_rf(rt2x00dev, RF3320) ||
- rt2x00_rf(rt2x00dev, RF3322) ||
- rt2x00_rf(rt2x00dev, RF5360) ||
- rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5372) ||
- rt2x00_rf(rt2x00dev, RF5390) ||
- rt2x00_rf(rt2x00dev, RF5392)) {
+ break;
+
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3070:
+ case RF3290:
+ case RF3320:
+ case RF3322:
+ case RF5360:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+ case RF5392:
spec->num_channels = 14;
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3052)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+
+ case RF3052:
+ case RF3053:
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
- } else if (rt2x00_rf(rt2x00dev, RF3053)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
- spec->num_channels = ARRAY_SIZE(rf_vals_3053);
- spec->channels = rf_vals_3053;
- } else if (rt2x00_rf(rt2x00dev, RF5592)) {
- spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ break;
+ case RF5592:
rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7572,11 +7548,16 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
spec->channels = rf_vals_5592_xtal20;
}
+ break;
}
if (WARN_ON_ONCE(!spec->channels))
return -ENODEV;
+ spec->supported_bands = SUPPORT_BAND_2GHZ;
+ if (spec->num_channels > 14)
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
/*
* Initialize HT information.
*/
@@ -7591,22 +7572,21 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
+ if (rt2x00dev->default_ant.tx_chain_num >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
- spec->ht.cap |=
- rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
- IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
IEEE80211_HT_MCS_TX_DEFINED |
IEEE80211_HT_MCS_TX_RX_DIFF |
- ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ ((rt2x00dev->default_ant.tx_chain_num - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
+ switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
@@ -7671,6 +7651,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3320:
case RF3052:
case RF3053:
+ case RF3070:
case RF3290:
case RF5360:
case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644
index 000000000000..ae152280e071
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -0,0 +1,873 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: rt2800 MMIO device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/*
+ * TX descriptor initialization
+ */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
+{
+ return (__le32 *) entry->skb->data;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
+
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *txd = entry_priv->desc;
+ u32 word;
+ const unsigned int txwi_size = entry->queue->winfo_size;
+
+ /*
+ * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+ * must contains a TXWI structure + 802.11 header + padding + 802.11
+ * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+ * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+ * data. It means that LAST_SEC0 is always 0.
+ */
+
+ /*
+ * Initialize TX descriptor
+ */
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+ rt2x00_desc_write(txd, 0, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+ !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
+ rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(txd, 1, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+ skbdesc->skb_dma + txwi_size);
+ rt2x00_desc_write(txd, 2, word);
+
+ word = 0;
+ rt2x00_set_field32(&word, TXD_W3_WIV,
+ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+ rt2x00_desc_write(txd, 3, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
+
+/*
+ * RX control handlers
+ */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ __le32 *rxd = entry_priv->desc;
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
+ rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
+
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
+ /*
+ * Hardware has stripped IV/EIV data from 802.11 frame during
+ * decryption. Unfortunately the descriptor doesn't contain
+ * any fields with the EIV/IV data either, so they can't
+ * be restored by rt2x00lib.
+ */
+ rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+ if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+ rxdesc->flags |= RX_FLAG_DECRYPTED;
+ else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+ rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+ }
+
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
+ rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
+ rxdesc->dev_flags |= RXDONE_L2PAD;
+
+ /*
+ * Process the RXWI structure that is at the start of the buffer.
+ */
+ rt2800_process_rxwi(entry, rxdesc);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
+static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, tx_wcid;
+
+ wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+ txwi = rt2800_drv_get_txwi(entry);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+ return (tx_wcid == wcid);
+}
+
+static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * rt2800pci hardware might reorder frames when exchanging traffic
+ * with multiple BA enabled STAs.
+ *
+ * For example, a tx queue
+ * [ STA1 | STA2 | STA1 | STA2 ]
+ * can result in tx status reports
+ * [ STA1 | STA1 | STA2 | STA2 ]
+ * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+ *
+ * To mitigate this effect, associate the tx status to the first frame
+ * in the tx queue with a matching wcid.
+ */
+ if (rt2800mmio_txdone_entry_check(entry, status) &&
+ !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+
+static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * Find the first frame without tx status and assign this status to it
+ * regardless if it matches or not.
+ */
+ if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
+ void *data)
+{
+ if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ rt2800_txdone_entry(entry, entry->status,
+ rt2800mmio_get_txwi(entry));
+ return false;
+ }
+
+ /* No more frames to release */
+ return true;
+}
+
+static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ u32 status;
+ u8 qid;
+ int max_tx_done = 16;
+
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
+ qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
+ if (unlikely(qid >= QID_RX)) {
+ /*
+ * Unknown queue, this shouldn't happen. Just drop
+ * this tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
+ qid);
+ break;
+ }
+
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+ if (unlikely(queue == NULL)) {
+ /*
+ * The queue is NULL, this shouldn't happen. Stop
+ * processing here and drop the tx status
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ if (unlikely(rt2x00queue_empty(queue))) {
+ /*
+ * The queue is empty. Stop processing here
+ * and drop the tx status.
+ */
+ rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ /*
+ * Let's associate this tx status with the first
+ * matching frame.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_find_entry)) {
+ /*
+ * We cannot match the tx status to any frame, so just
+ * use the first one.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800mmio_txdone_match_first)) {
+ rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
+ qid);
+ break;
+ }
+ }
+
+ /*
+ * Release all frames with a valid tx status.
+ */
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, NULL,
+ rt2800mmio_txdone_release_entries);
+
+ if (--max_tx_done == 0)
+ break;
+ }
+
+ return !max_tx_done;
+}
+
+static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ u32 reg;
+
+ /*
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
+ */
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2800mmio_txdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+ /*
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
+ */
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
+
+void rt2800mmio_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
+
+void rt2800mmio_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+
+ rt2x00lib_beacondone(rt2x00dev);
+
+ if (rt2x00dev->intf_ap_count) {
+ /*
+ * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+ * causing beacon skew and as a result causing problems with
+ * some powersaving clients over time. Shorten the beacon
+ * interval every 64 beacons by 64us to mitigate this effect.
+ */
+ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16) - 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+ (rt2x00dev->beacon_int * 16));
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ }
+ drv_data->tbtt_tick++;
+ drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+ }
+
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
+
+void rt2800mmio_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ if (rt2x00mmio_rxdone(rt2x00dev))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+ else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
+
+void rt2800mmio_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800mmio_wakeup(rt2x00dev);
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev,
+ INT_MASK_CSR_AUTO_WAKEUP);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
+
+static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ u32 status;
+ int i;
+
+ /*
+ * The TX_FIFO_STATUS interrupt needs special care. We should
+ * read TX_STA_FIFO but we should do it immediately as otherwise
+ * the register can overflow and we would lose status reports.
+ *
+ * Hence, read the TX_STA_FIFO register and copy all tx status
+ * reports into a kernel FIFO which is handled in the txstatus
+ * tasklet. We use a tasklet to process the tx status reports
+ * because we can schedule the tasklet multiple times (when the
+ * interrupt fires again during tx status processing).
+ *
+ * Furthermore we don't disable the TX_FIFO_STATUS
+ * interrupt here but leave it enabled so that the TX_STA_FIFO
+ * can also be read while the tx status tasklet gets executed.
+ *
+ * Since we have only one producer and one consumer we don't
+ * need to lock the kfifo.
+ */
+ for (i = 0; i < rt2x00dev->tx->limit; i++) {
+ rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+
+ if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
+ break;
+
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
+ rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
+ break;
+ }
+ }
+
+ /* Schedule the tasklet for processing the tx status. */
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+}
+
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
+{
+ struct rt2x00_dev *rt2x00dev = dev_instance;
+ u32 reg, mask;
+
+ /* Read status and ACK all interrupts */
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ if (!reg)
+ return IRQ_NONE;
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return IRQ_HANDLED;
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800mmio_txstatus_interrupt(rt2x00dev);
+ /*
+ * Never disable the TX_FIFO_STATUS interrupt.
+ */
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+ rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
+
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ u32 reg;
+ unsigned long flags;
+
+ /*
+ * When interrupts are being enabled, the interrupt registers
+ * should clear the register to assure a clean state.
+ */
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+ rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+ }
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
+ rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Wait for possibly running tasklets to finish.
+ */
+ tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
+
+/*
+ * Queue handlers.
+ */
+void rt2800mmio_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+
+void rt2800mmio_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct queue_entry *entry;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
+ entry->entry_idx);
+ break;
+ case QID_MGMT:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
+ entry->entry_idx);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+
+void rt2800mmio_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for current invocation to finish. The tasklet
+ * won't be scheduled anymore afterwards since we disabled
+ * the TBTT and PRE TBTT timer.
+ */
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
+
+void rt2800mmio_queue_init(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
+
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
+ switch (queue->qid) {
+ case QID_RX:
+ queue->limit = 128;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = RXD_DESC_SIZE;
+ queue->winfo_size = rxwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ queue->limit = 64;
+ queue->data_size = AGGREGATION_SIZE;
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_BEACON:
+ queue->limit = 8;
+ queue->data_size = 0; /* No DMA required for beacons */
+ queue->desc_size = TXD_DESC_SIZE;
+ queue->winfo_size = txwi_size;
+ queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+ break;
+
+ case QID_ATIM:
+ /* fallthrough */
+ default:
+ BUG();
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
+
+/*
+ * Initialization functions.
+ */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+ return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
+
+void rt2800mmio_clear_entry(struct queue_entry *entry)
+{
+ struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 word;
+
+ if (entry->queue->qid == QID_RX) {
+ rt2x00_desc_read(entry_priv->desc, 0, &word);
+ rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+ rt2x00_desc_write(entry_priv->desc, 0, word);
+
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Set RX IDX in register to inform hardware that we have
+ * handled this entry and it is available for reuse again.
+ */
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ entry->entry_idx);
+ } else {
+ rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+ rt2x00_desc_write(entry_priv->desc, 1, word);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
+
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct queue_entry_priv_mmio *entry_priv;
+
+ /*
+ * Initialize registers.
+ */
+ entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
+ rt2x00dev->tx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+ entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
+ rt2x00dev->tx[1].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+ entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
+ rt2x00dev->tx[2].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+ entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
+ rt2x00dev->tx[3].limit);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
+ entry_priv = rt2x00dev->rx->entries[0].priv_data;
+ rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
+ entry_priv->desc_dma);
+ rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
+ rt2x00dev->rx[0].limit);
+ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+ rt2x00dev->rx[0].limit - 1);
+ rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+ rt2800_disable_wpdma(rt2x00dev);
+
+ rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
+
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ /*
+ * Reset DMA indexes
+ */
+ rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+ if (rt2x00_is_pcie(rt2x00dev) &&
+ (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))) {
+ rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
+
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+ reg = 0;
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
+
+/*
+ * Device state switch handlers.
+ */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ /* Wait for DMA, ignore error until we initialize queues. */
+ rt2800_wait_wpdma_ready(rt2x00dev);
+
+ if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
+ return -EIO;
+
+ return rt2800_enable_radio(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2800 MMIO library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644
index 000000000000..6a10de3eee3e
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -0,0 +1,165 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800mmio
+ * Abstract: forward declarations for the rt2800mmio module.
+ */
+
+#ifndef RT2800MMIO_H
+#define RT2800MMIO_H
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET 0x10
+#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE (4 * sizeof(__le32))
+#define RXD_DESC_SIZE (4 * sizeof(__le32))
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
+#define TXD_W1_BURST FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ * 0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV FIELD32(0x01000000)
+#define TXD_W3_QSEL FIELD32(0x06000000)
+#define TXD_W3_TCO FIELD32(0x20000000)
+#define TXD_W3_UCO FIELD32(0x40000000)
+#define TXD_W3_ICO FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1 FIELD32(0x00003fff)
+#define RXD_W1_SDL0 FIELD32(0x3fff0000)
+#define RXD_W1_LS0 FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA FIELD32(0x00000001)
+#define RXD_W3_DATA FIELD32(0x00000002)
+#define RXD_W3_NULLDATA FIELD32(0x00000004)
+#define RXD_W3_FRAG FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W3_MULTICAST FIELD32(0x00000020)
+#define RXD_W3_BROADCAST FIELD32(0x00000040)
+#define RXD_W3_MY_BSS FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W3_AMSDU FIELD32(0x00000800)
+#define RXD_W3_HTC FIELD32(0x00001000)
+#define RXD_W3_RSSI FIELD32(0x00002000)
+#define RXD_W3_L2PAD FIELD32(0x00004000)
+#define RXD_W3_AMPDU FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
+
+/* TX descriptor initialization */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+
+/* RX control handlers */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+ struct rxdone_entry_desc *rxdesc);
+
+/* Interrupt functions */
+void rt2800mmio_txstatus_tasklet(unsigned long data);
+void rt2800mmio_pretbtt_tasklet(unsigned long data);
+void rt2800mmio_tbtt_tasklet(unsigned long data);
+void rt2800mmio_rxdone_tasklet(unsigned long data);
+void rt2800mmio_autowake_tasklet(unsigned long data);
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state);
+
+/* Queue handlers */
+void rt2800mmio_start_queue(struct data_queue *queue);
+void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_stop_queue(struct data_queue *queue);
+void rt2800mmio_queue_init(struct data_queue *queue);
+
+/* Initialization functions */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry);
+void rt2800mmio_clear_entry(struct queue_entry *entry);
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
+
+/* Device state switch handlers. */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2800MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index f8f2abbfbb65..b504455b4fec 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -37,14 +37,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/eeprom_93cx6.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
#include "rt2x00pci.h"
-#include "rt2x00soc.h"
#include "rt2800lib.h"
+#include "rt2800mmio.h"
#include "rt2800.h"
#include "rt2800pci.h"
@@ -90,27 +89,6 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
-
- if (!base_addr)
- return -ENOMEM;
-
- memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
-
- iounmap(base_addr);
- return 0;
-}
-#else
-static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
- return -ENOMEM;
-}
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
{
struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@ static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
return rt2800_read_eeprom_efuse(rt2x00dev);
}
-#else
-static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
-{
- return 0;
-}
-
-static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
-{
- return -EOPNOTSUPP;
-}
-#endif /* CONFIG_PCI */
-
-/*
- * Queue handlers.
- */
-static void rt2800pci_start_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_kick_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- struct queue_entry *entry;
-
- switch (queue->qid) {
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
- entry->entry_idx);
- break;
- case QID_MGMT:
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
- entry->entry_idx);
- break;
- default:
- break;
- }
-}
-
-static void rt2800pci_stop_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- switch (queue->qid) {
- case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- break;
- case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
- rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
- /*
- * Wait for current invocation to finish. The tasklet
- * won't be scheduled anymore afterwards since we disabled
- * the TBTT and PRE TBTT timer.
- */
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-
- break;
- default:
- break;
- }
-}
/*
* Firmware functions
@@ -332,217 +204,13 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
}
/*
- * Initialization functions.
- */
-static bool rt2800pci_get_entry_state(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
-
- return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
- }
-}
-
-static void rt2800pci_clear_entry(struct queue_entry *entry)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 word;
-
- if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
- rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 0, word);
-
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(entry_priv->desc, 1, word);
-
- /*
- * Set RX IDX in register to inform hardware that we have
- * handled this entry and it is available for reuse again.
- */
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- entry->entry_idx);
- } else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
- rt2x00_desc_write(entry_priv->desc, 1, word);
- }
-}
-
-static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct queue_entry_priv_mmio *entry_priv;
-
- /*
- * Initialize registers.
- */
- entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
- rt2x00dev->tx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
-
- entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
- rt2x00dev->tx[1].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
-
- entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
- rt2x00dev->tx[2].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
-
- entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
- rt2x00dev->tx[3].limit);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
-
- rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
-
- entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
- entry_priv->desc_dma);
- rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
- rt2x00dev->rx[0].limit);
- rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
- rt2x00dev->rx[0].limit - 1);
- rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
-
- rt2800_disable_wpdma(rt2x00dev);
-
- rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
-
- return 0;
-}
-
-/*
* Device state switch handlers.
*/
-static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
- unsigned long flags;
-
- /*
- * When interrupts are being enabled, the interrupt registers
- * should clear the register to assure a clean state.
- */
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
- }
-
- spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- reg = 0;
- if (state == STATE_RADIO_IRQ_ON) {
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
- }
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
-
- if (state == STATE_RADIO_IRQ_OFF) {
- /*
- * Wait for possibly running tasklets to finish.
- */
- tasklet_kill(&rt2x00dev->txstatus_tasklet);
- tasklet_kill(&rt2x00dev->rxdone_tasklet);
- tasklet_kill(&rt2x00dev->autowake_tasklet);
- tasklet_kill(&rt2x00dev->tbtt_tasklet);
- tasklet_kill(&rt2x00dev->pretbtt_tasklet);
- }
-}
-
-static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
-
- /*
- * Reset DMA indexes
- */
- rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
- if (rt2x00_is_pcie(rt2x00dev) &&
- (rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390) ||
- rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT5592))) {
- rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
- rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
- rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
- rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
- }
-
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
- reg = 0;
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
- return 0;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
int retval;
- /* Wait for DMA, ignore error until we initialize queues. */
- rt2800_wait_wpdma_ready(rt2x00dev);
-
- if (unlikely(rt2800pci_init_queues(rt2x00dev)))
- return -EIO;
-
- retval = rt2800_enable_radio(rt2x00dev);
+ retval = rt2800mmio_enable_radio(rt2x00dev);
if (retval)
return retval;
@@ -559,15 +227,6 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
return retval;
}
-static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
-{
- if (rt2x00_is_soc(rt2x00dev)) {
- rt2800_disable_radio(rt2x00dev);
- rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
- }
-}
-
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
@@ -601,12 +260,11 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
* After the radio has been disabled, the device should
* be put to sleep for powersaving.
*/
- rt2800pci_disable_radio(rt2x00dev);
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
case STATE_RADIO_IRQ_OFF:
- rt2800pci_toggle_irq(rt2x00dev, state);
+ rt2800mmio_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
case STATE_SLEEP:
@@ -627,479 +285,13 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
}
/*
- * TX descriptor initialization
- */
-static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
-{
- return (__le32 *) entry->skb->data;
-}
-
-static void rt2800pci_write_tx_desc(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *txd = entry_priv->desc;
- u32 word;
- const unsigned int txwi_size = entry->queue->winfo_size;
-
- /*
- * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
- * must contains a TXWI structure + 802.11 header + padding + 802.11
- * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
- * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
- * data. It means that LAST_SEC0 is always 0.
- */
-
- /*
- * Initialize TX descriptor
- */
- word = 0;
- rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
- rt2x00_desc_write(txd, 0, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
- !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_BURST,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
- rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
- rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
- rt2x00_desc_write(txd, 1, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + txwi_size);
- rt2x00_desc_write(txd, 2, word);
-
- word = 0;
- rt2x00_set_field32(&word, TXD_W3_WIV,
- !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
- rt2x00_desc_write(txd, 3, word);
-
- /*
- * Register descriptor details in skb frame descriptor.
- */
- skbdesc->desc = txd;
- skbdesc->desc_len = TXD_DESC_SIZE;
-}
-
-/*
- * RX control handlers
- */
-static void rt2800pci_fill_rxdone(struct queue_entry *entry,
- struct rxdone_entry_desc *rxdesc)
-{
- struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
- __le32 *rxd = entry_priv->desc;
- u32 word;
-
- rt2x00_desc_read(rxd, 3, &word);
-
- if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
- rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
-
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
-
- if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
- /*
- * Hardware has stripped IV/EIV data from 802.11 frame during
- * decryption. Unfortunately the descriptor doesn't contain
- * any fields with the EIV/IV data either, so they can't
- * be restored by rt2x00lib.
- */
- rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-
- /*
- * The hardware has already checked the Michael Mic and has
- * stripped it from the frame. Signal this to mac80211.
- */
- rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
- if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
- rxdesc->flags |= RX_FLAG_DECRYPTED;
- else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
- rxdesc->flags |= RX_FLAG_MMIC_ERROR;
- }
-
- if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
- rxdesc->dev_flags |= RXDONE_MY_BSS;
-
- if (rt2x00_get_field32(word, RXD_W3_L2PAD))
- rxdesc->dev_flags |= RXDONE_L2PAD;
-
- /*
- * Process the RXWI structure that is at the start of the buffer.
- */
- rt2800_process_rxwi(entry, rxdesc);
-}
-
-/*
- * Interrupt functions.
- */
-static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
-{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
-
- rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
-}
-
-static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
- __le32 *txwi;
- u32 word;
- int wcid, tx_wcid;
-
- wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
- txwi = rt2800_drv_get_txwi(entry);
- rt2x00_desc_read(txwi, 1, &word);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
- return (tx_wcid == wcid);
-}
-
-static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * rt2800pci hardware might reorder frames when exchanging traffic
- * with multiple BA enabled STAs.
- *
- * For example, a tx queue
- * [ STA1 | STA2 | STA1 | STA2 ]
- * can result in tx status reports
- * [ STA1 | STA1 | STA2 | STA2 ]
- * when the hw decides to aggregate the frames for STA1 into one AMPDU.
- *
- * To mitigate this effect, associate the tx status to the first frame
- * in the tx queue with a matching wcid.
- */
- if (rt2800pci_txdone_entry_check(entry, status) &&
- !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-
-static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * Find the first frame without tx status and assign this status to it
- * regardless if it matches or not.
- */
- if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
- void *data)
-{
- if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- rt2800_txdone_entry(entry, entry->status,
- rt2800pci_get_txwi(entry));
- return false;
- }
-
- /* No more frames to release */
- return true;
-}
-
-static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- u32 status;
- u8 qid;
- int max_tx_done = 16;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
- qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
- if (unlikely(qid >= QID_RX)) {
- /*
- * Unknown queue, this shouldn't happen. Just drop
- * this tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
- qid);
- break;
- }
-
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(queue == NULL)) {
- /*
- * The queue is NULL, this shouldn't happen. Stop
- * processing here and drop the tx status
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
- qid);
- break;
- }
-
- if (unlikely(rt2x00queue_empty(queue))) {
- /*
- * The queue is empty. Stop processing here
- * and drop the tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
- break;
- }
-
- /*
- * Let's associate this tx status with the first
- * matching frame.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_find_entry)) {
- /*
- * We cannot match the tx status to any frame, so just
- * use the first one.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800pci_txdone_match_first)) {
- rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
- qid);
- break;
- }
- }
-
- /*
- * Release all frames with a valid tx status.
- */
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, NULL,
- rt2800pci_txdone_release_entries);
-
- if (--max_tx_done == 0)
- break;
- }
-
- return !max_tx_done;
-}
-
-static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_field32 irq_field)
-{
- u32 reg;
-
- /*
- * Enable a single interrupt. The interrupt mask register
- * access needs locking.
- */
- spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, irq_field, 1);
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock_irq(&rt2x00dev->irqmask_lock);
-}
-
-static void rt2800pci_txstatus_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2800pci_txdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
- /*
- * No need to enable the tx status interrupt here as we always
- * leave it enabled to minimize the possibility of a tx status
- * register overflow. See comment in interrupt handler.
- */
-}
-
-static void rt2800pci_pretbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2x00lib_pretbtt(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
-}
-
-static void rt2800pci_tbtt_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
- u32 reg;
-
- rt2x00lib_beacondone(rt2x00dev);
-
- if (rt2x00dev->intf_ap_count) {
- /*
- * The rt2800pci hardware tbtt timer is off by 1us per tbtt
- * causing beacon skew and as a result causing problems with
- * some powersaving clients over time. Shorten the beacon
- * interval every 64 beacons by 64us to mitigate this effect.
- */
- if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16) - 1);
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
- (rt2x00dev->beacon_int * 16));
- rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- drv_data->tbtt_tick++;
- drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
- }
-
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
-}
-
-static void rt2800pci_rxdone_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2x00mmio_rxdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
-}
-
-static void rt2800pci_autowake_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- rt2800pci_wakeup(rt2x00dev);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
-}
-
-static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
-{
- u32 status;
- int i;
-
- /*
- * The TX_FIFO_STATUS interrupt needs special care. We should
- * read TX_STA_FIFO but we should do it immediately as otherwise
- * the register can overflow and we would lose status reports.
- *
- * Hence, read the TX_STA_FIFO register and copy all tx status
- * reports into a kernel FIFO which is handled in the txstatus
- * tasklet. We use a tasklet to process the tx status reports
- * because we can schedule the tasklet multiple times (when the
- * interrupt fires again during tx status processing).
- *
- * Furthermore we don't disable the TX_FIFO_STATUS
- * interrupt here but leave it enabled so that the TX_STA_FIFO
- * can also be read while the tx status tasklet gets executed.
- *
- * Since we have only one producer and one consumer we don't
- * need to lock the kfifo.
- */
- for (i = 0; i < rt2x00dev->tx->limit; i++) {
- rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
-
- if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
- break;
-
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
- rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
- break;
- }
- }
-
- /* Schedule the tasklet for processing the tx status. */
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-}
-
-static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg, mask;
-
- /* Read status and ACK all interrupts */
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
- rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
- if (!reg)
- return IRQ_NONE;
-
- if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- return IRQ_HANDLED;
-
- /*
- * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
- * for interrupts and interrupt masks we can just use the value of
- * INT_SOURCE_CSR to create the interrupt mask.
- */
- mask = ~reg;
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
- rt2800pci_txstatus_interrupt(rt2x00dev);
- /*
- * Never disable the TX_FIFO_STATUS interrupt.
- */
- rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- }
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- tasklet_schedule(&rt2x00dev->autowake_tasklet);
-
- /*
- * Disable all interrupts for which a tasklet was scheduled right now,
- * the tasklet will reenable the appropriate interrupts.
- */
- spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- reg &= mask;
- rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- spin_unlock(&rt2x00dev->irqmask_lock);
-
- return IRQ_HANDLED;
-}
-
-/*
* Device probe functions.
*/
static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (rt2x00_is_soc(rt2x00dev))
- retval = rt2800pci_read_eeprom_soc(rt2x00dev);
- else if (rt2800pci_efuse_detect(rt2x00dev))
+ if (rt2800pci_efuse_detect(rt2x00dev))
retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
else
retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.read_eeprom = rt2800pci_read_eeprom,
.hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
.drv_write_firmware = rt2800pci_write_firmware,
- .drv_init_registers = rt2800pci_init_registers,
- .drv_get_txwi = rt2800pci_get_txwi,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
};
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
- .irq_handler = rt2800pci_interrupt,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
- .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
- .tbtt_tasklet = rt2800pci_tbtt_tasklet,
- .rxdone_tasklet = rt2800pci_rxdone_tasklet,
- .autowake_tasklet = rt2800pci_autowake_tasklet,
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
.probe_hw = rt2800_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
.initialize = rt2x00mmio_initialize,
.uninitialize = rt2x00mmio_uninitialize,
- .get_entry_state = rt2800pci_get_entry_state,
- .clear_entry = rt2800pci_clear_entry,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
.set_device_state = rt2800pci_set_device_state,
.rfkill_poll = rt2800_rfkill_poll,
.link_stats = rt2800_link_stats,
@@ -1171,15 +363,15 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
- .start_queue = rt2800pci_start_queue,
- .kick_queue = rt2800pci_kick_queue,
- .stop_queue = rt2800pci_stop_queue,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
.flush_queue = rt2x00mmio_flush_queue,
- .write_tx_desc = rt2800pci_write_tx_desc,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
- .fill_rxdone = rt2800pci_fill_rxdone,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
.config_filter = rt2800_config_filter,
@@ -1191,49 +383,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.sta_remove = rt2800_sta_remove,
};
-static void rt2800pci_queue_init(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- unsigned short txwi_size, rxwi_size;
-
- rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
-
- switch (queue->qid) {
- case QID_RX:
- queue->limit = 128;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = RXD_DESC_SIZE;
- queue->winfo_size = rxwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_AC_VO:
- case QID_AC_VI:
- case QID_AC_BE:
- case QID_AC_BK:
- queue->limit = 64;
- queue->data_size = AGGREGATION_SIZE;
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_BEACON:
- queue->limit = 8;
- queue->data_size = 0; /* No DMA required for beacons */
- queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = txwi_size;
- queue->priv_size = sizeof(struct queue_entry_priv_mmio);
- break;
-
- case QID_ATIM:
- /* fallthrough */
- default:
- BUG();
- break;
- }
-}
-
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
- .queue_init = rt2800pci_queue_init,
+ .queue_init = rt2800mmio_queue_init,
.lib = &rt2800pci_rt2x00_ops,
.drv = &rt2800pci_rt2800_ops,
.hw = &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-#ifdef CONFIG_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601) },
{ PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
#endif
{ 0, }
};
-#endif /* CONFIG_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_PCI
MODULE_FIRMWARE(FIRMWARE_RT2860);
MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_PCI */
MODULE_LICENSE("GPL");
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800soc_probe(struct platform_device *pdev)
-{
- return rt2x00soc_probe(pdev, &rt2800pci_ops);
-}
-
-static struct platform_driver rt2800soc_driver = {
- .driver = {
- .name = "rt2800_wmac",
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
- .probe = rt2800soc_probe,
- .remove = rt2x00soc_remove,
- .suspend = rt2x00soc_suspend,
- .resume = rt2x00soc_resume,
-};
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
static int rt2800pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -1344,39 +469,5 @@ static struct pci_driver rt2800pci_driver = {
.suspend = rt2x00pci_suspend,
.resume = rt2x00pci_resume,
};
-#endif /* CONFIG_PCI */
-
-static int __init rt2800pci_init(void)
-{
- int ret = 0;
-
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- ret = platform_driver_register(&rt2800soc_driver);
- if (ret)
- return ret;
-#endif
-#ifdef CONFIG_PCI
- ret = pci_register_driver(&rt2800pci_driver);
- if (ret) {
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
- return ret;
- }
-#endif
-
- return ret;
-}
-
-static void __exit rt2800pci_exit(void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver(&rt2800pci_driver);
-#endif
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
- platform_driver_unregister(&rt2800soc_driver);
-#endif
-}
-module_init(rt2800pci_init);
-module_exit(rt2800pci_exit);
+module_pci_driver(rt2800pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index ab22a087c50d..a81c9ee281c0 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,107 +35,10 @@
#define RT2800PCI_H
/*
- * Queue register offset macros
- */
-#define TX_QUEUE_REG_OFFSET 0x10
-#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-
-/*
* 8051 firmware image.
*/
#define FIRMWARE_RT2860 "rt2860.bin"
#define FIRMWARE_RT3290 "rt3290.bin"
#define FIRMWARE_IMAGE_BASE 0x2000
-/*
- * DMA descriptor defines.
- */
-#define TXD_DESC_SIZE (4 * sizeof(__le32))
-#define RXD_DESC_SIZE (4 * sizeof(__le32))
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
-#define TXD_W1_BURST FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- * 0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV FIELD32(0x01000000)
-#define TXD_W3_QSEL FIELD32(0x06000000)
-#define TXD_W3_TCO FIELD32(0x20000000)
-#define TXD_W3_UCO FIELD32(0x40000000)
-#define TXD_W3_ICO FIELD32(0x80000000)
-
-/*
- * RX descriptor format for RX Ring.
- */
-
-/*
- * Word0
- */
-#define RXD_W0_SDP0 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define RXD_W1_SDL1 FIELD32(0x00003fff)
-#define RXD_W1_SDL0 FIELD32(0x3fff0000)
-#define RXD_W1_LS0 FIELD32(0x40000000)
-#define RXD_W1_DMA_DONE FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define RXD_W2_SDP1 FIELD32(0xffffffff)
-
-/*
- * Word3
- * AMSDU: RX with 802.3 header, not 802.11 header.
- * DECRYPTED: This frame is being decrypted.
- */
-#define RXD_W3_BA FIELD32(0x00000001)
-#define RXD_W3_DATA FIELD32(0x00000002)
-#define RXD_W3_NULLDATA FIELD32(0x00000004)
-#define RXD_W3_FRAG FIELD32(0x00000008)
-#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXD_W3_MULTICAST FIELD32(0x00000020)
-#define RXD_W3_BROADCAST FIELD32(0x00000040)
-#define RXD_W3_MY_BSS FIELD32(0x00000080)
-#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
-#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
-#define RXD_W3_AMSDU FIELD32(0x00000800)
-#define RXD_W3_HTC FIELD32(0x00001000)
-#define RXD_W3_RSSI FIELD32(0x00002000)
-#define RXD_W3_L2PAD FIELD32(0x00004000)
-#define RXD_W3_AMPDU FIELD32(0x00008000)
-#define RXD_W3_DECRYPTED FIELD32(0x00010000)
-#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
-#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
-
#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644
index 000000000000..1359227ca411
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -0,0 +1,263 @@
+/* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Module: rt2800soc
+ * Abstract: rt2800 WiSoC specific routines.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2x00soc.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/* Allow hardware encryption to be disabled. */
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ return modparam_nohwcrypt;
+}
+
+static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_disable_radio(rt2x00dev);
+ rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+}
+
+static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+{
+ int retval = 0;
+
+ switch (state) {
+ case STATE_RADIO_ON:
+ retval = rt2800mmio_enable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_OFF:
+ rt2800soc_disable_radio(rt2x00dev);
+ break;
+
+ case STATE_RADIO_IRQ_ON:
+ case STATE_RADIO_IRQ_OFF:
+ rt2800mmio_toggle_irq(rt2x00dev, state);
+ break;
+
+ case STATE_DEEP_SLEEP:
+ case STATE_SLEEP:
+ case STATE_STANDBY:
+ case STATE_AWAKE:
+ /* These states are not supported, but don't report an error */
+ retval = 0;
+ break;
+
+ default:
+ retval = -ENOTSUPP;
+ break;
+ }
+
+ if (unlikely(retval))
+ rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
+ state, retval);
+
+ return retval;
+}
+
+static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+
+ if (!base_addr)
+ return -ENOMEM;
+
+ memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+ iounmap(base_addr);
+ return 0;
+}
+
+/* Firmware functions */
+static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
+ const u8 *data, const size_t len)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .tx = rt2x00mac_tx,
+ .start = rt2x00mac_start,
+ .stop = rt2x00mac_stop,
+ .add_interface = rt2x00mac_add_interface,
+ .remove_interface = rt2x00mac_remove_interface,
+ .config = rt2x00mac_config,
+ .configure_filter = rt2x00mac_configure_filter,
+ .set_key = rt2x00mac_set_key,
+ .sw_scan_start = rt2x00mac_sw_scan_start,
+ .sw_scan_complete = rt2x00mac_sw_scan_complete,
+ .get_stats = rt2x00mac_get_stats,
+ .get_tkip_seq = rt2800_get_tkip_seq,
+ .set_rts_threshold = rt2800_set_rts_threshold,
+ .sta_add = rt2x00mac_sta_add,
+ .sta_remove = rt2x00mac_sta_remove,
+ .bss_info_changed = rt2x00mac_bss_info_changed,
+ .conf_tx = rt2800_conf_tx,
+ .get_tsf = rt2800_get_tsf,
+ .rfkill_poll = rt2x00mac_rfkill_poll,
+ .ampdu_action = rt2800_ampdu_action,
+ .flush = rt2x00mac_flush,
+ .get_survey = rt2800_get_survey,
+ .get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
+};
+
+static const struct rt2800_ops rt2800soc_rt2800_ops = {
+ .register_read = rt2x00mmio_register_read,
+ .register_read_lock = rt2x00mmio_register_read, /* same for SoCs */
+ .register_write = rt2x00mmio_register_write,
+ .register_write_lock = rt2x00mmio_register_write, /* same for SoCs */
+ .register_multiread = rt2x00mmio_register_multiread,
+ .register_multiwrite = rt2x00mmio_register_multiwrite,
+ .regbusy_read = rt2x00mmio_regbusy_read,
+ .read_eeprom = rt2800soc_read_eeprom,
+ .hwcrypt_disabled = rt2800soc_hwcrypt_disabled,
+ .drv_write_firmware = rt2800soc_write_firmware,
+ .drv_init_registers = rt2800mmio_init_registers,
+ .drv_get_txwi = rt2800mmio_get_txwi,
+};
+
+static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
+ .irq_handler = rt2800mmio_interrupt,
+ .txstatus_tasklet = rt2800mmio_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800mmio_tbtt_tasklet,
+ .rxdone_tasklet = rt2800mmio_rxdone_tasklet,
+ .autowake_tasklet = rt2800mmio_autowake_tasklet,
+ .probe_hw = rt2800_probe_hw,
+ .get_firmware_name = rt2800soc_get_firmware_name,
+ .check_firmware = rt2800soc_check_firmware,
+ .load_firmware = rt2800soc_load_firmware,
+ .initialize = rt2x00mmio_initialize,
+ .uninitialize = rt2x00mmio_uninitialize,
+ .get_entry_state = rt2800mmio_get_entry_state,
+ .clear_entry = rt2800mmio_clear_entry,
+ .set_device_state = rt2800soc_set_device_state,
+ .rfkill_poll = rt2800_rfkill_poll,
+ .link_stats = rt2800_link_stats,
+ .reset_tuner = rt2800_reset_tuner,
+ .link_tuner = rt2800_link_tuner,
+ .gain_calibration = rt2800_gain_calibration,
+ .vco_calibration = rt2800_vco_calibration,
+ .start_queue = rt2800mmio_start_queue,
+ .kick_queue = rt2800mmio_kick_queue,
+ .stop_queue = rt2800mmio_stop_queue,
+ .flush_queue = rt2x00mmio_flush_queue,
+ .write_tx_desc = rt2800mmio_write_tx_desc,
+ .write_tx_data = rt2800_write_tx_data,
+ .write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
+ .fill_rxdone = rt2800mmio_fill_rxdone,
+ .config_shared_key = rt2800_config_shared_key,
+ .config_pairwise_key = rt2800_config_pairwise_key,
+ .config_filter = rt2800_config_filter,
+ .config_intf = rt2800_config_intf,
+ .config_erp = rt2800_config_erp,
+ .config_ant = rt2800_config_ant,
+ .config = rt2800_config,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
+};
+
+static const struct rt2x00_ops rt2800soc_ops = {
+ .name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
+ .max_ap_intf = 8,
+ .eeprom_size = EEPROM_SIZE,
+ .rf_size = RF_SIZE,
+ .tx_queues = NUM_TX_QUEUES,
+ .queue_init = rt2800mmio_queue_init,
+ .lib = &rt2800soc_rt2x00_ops,
+ .drv = &rt2800soc_rt2800_ops,
+ .hw = &rt2800soc_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+ .debugfs = &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+ return rt2x00soc_probe(pdev, &rt2800soc_ops);
+}
+
+static struct platform_driver rt2800soc_driver = {
+ .driver = {
+ .name = "rt2800_wmac",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+ .probe = rt2800soc_probe,
+ .remove = rt2x00soc_remove,
+ .suspend = rt2x00soc_suspend,
+ .resume = rt2x00soc_resume,
+};
+
+module_platform_driver(rt2800soc_driver);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 96961b9a395c..997df03a0c2e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
return false;
}
+#define TXSTATUS_READ_INTERVAL 1000000
+
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
int urb_status, u32 tx_status)
{
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
if (rt2800usb_txstatus_pending(rt2x00dev)) {
- /* Read register after 250 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+ /* Read register after 1 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
return false;
}
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return;
- /* Read TX_STA_FIFO register after 500 us */
- hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+ /* Read TX_STA_FIFO register after 2 ms */
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
HRTIMER_MODE_REL);
}
@@ -1176,6 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x002f) },
{ USB_DEVICE(0x1737, 0x0079) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0170) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
@@ -1199,6 +1205,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x1103) },
/* Cameo */
{ USB_DEVICE(0x148f, 0xf301) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1f) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7733) },
/* Hawking */
@@ -1212,6 +1220,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x016b) },
/* NETGEAR */
{ USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
{ USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1229,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0067) },
{ USB_DEVICE(0x0df6, 0x006a) },
+ { USB_DEVICE(0x0df6, 0x006e) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3421) },
#endif
@@ -1236,6 +1246,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
{ USB_DEVICE(0x2001, 0x3c1e) },
+ { USB_DEVICE(0x2001, 0x3c20) },
+ { USB_DEVICE(0x2001, 0x3c22) },
+ { USB_DEVICE(0x2001, 0x3c23) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
{ USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1271,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
{ USB_DEVICE(0x057c, 0x8501) },
- /* D-Link DWA-160-B2 */
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x0241) },
+ /* D-Link */
{ USB_DEVICE(0x2001, 0x3c1a) },
+ { USB_DEVICE(0x2001, 0x3c21) },
/* Proware */
{ USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5572) },
+ /* TRENDnet */
+ { USB_DEVICE(0x20f4, 0x724a) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@@ -1333,6 +1351,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24) },
+ { USB_DEVICE(0x2019, 0xab29) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259) },
/* RadioShack */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index fe4c572db52c..e4ba2ce0f212 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
+#include <linux/average.h>
#include <net/mac80211.h>
@@ -138,17 +139,6 @@
#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
- int avg;
- int avg_weight;
-};
-
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
* Similar to the avg_rssi in the link_qual structure
* this value is updated by using the walking average.
*/
- struct avg_val rssi_ant;
+ struct ewma rssi_ant;
};
/*
@@ -326,7 +316,7 @@ struct link {
/*
* Currently active average RSSI value
*/
- struct avg_val avg_rssi;
+ struct ewma avg_rssi;
/*
* Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_capability_flags cap_flag)
+{
+ return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1ca4c7ffc189..3db0d99d9da7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
return;
__set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct ieee80211_key_conf *key = tx_info->control.hw_key;
unsigned int overhead = 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
return overhead;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fe7a7f63a9ed..7f7baae5ae02 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf, &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
intf->crypto_stats_entry =
debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
intf, &rt2x00debug_fop_crypto_stats);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 712eea9d398f..080b1fcae5fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00queue_start_queues(rt2x00dev);
rt2x00link_start_tuner(rt2x00dev);
rt2x00link_start_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_start_vcocal(rt2x00dev);
/*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
* Stop all queues
*/
rt2x00link_stop_agc(rt2x00dev);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
rt2x00link_stop_vcocal(rt2x00dev);
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
* here as they will fetch the next beacon directly prior to
* transmission.
*/
- if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
return;
/* fetch next beacon */
@@ -358,7 +358,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
rt2x00crypto_tx_insert_iv(entry->skb, header_length);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 8368aab86f28..c2b3b6629188 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -35,50 +35,28 @@
*/
#define DEFAULT_RSSI -128
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- * avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES 8
-#define AVG_FACTOR 1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
- struct avg_val __new; \
- __new.avg_weight = \
- (__avg).avg_weight ? \
- ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
- ((__val) * (AVG_FACTOR))) / \
- (AVG_SAMPLES)) : \
- ((__val) * (AVG_FACTOR)); \
- __new.avg = __new.avg_weight / (AVG_FACTOR); \
- __new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR 1024
+#define RT2X00_EWMA_WEIGHT 8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+ unsigned long avg;
+
+ avg = ewma_read(ewma);
+ if (avg)
+ return -avg;
+
+ return DEFAULT_RSSI;
+}
static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
{
struct link_ant *ant = &rt2x00dev->link.ant;
- if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
- return ant->rssi_ant.avg;
+ if (rt2x00dev->link.qual.rx_success)
+ return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
return DEFAULT_RSSI;
}
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
{
- rt2x00dev->link.ant.rssi_ant.avg = 0;
- rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+ ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
}
static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
/*
* Update global RSSI
*/
- link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+ ewma_add(&link->avg_rssi, -rxdesc->rssi);
/*
* Update antenna RSSI
*/
- ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+ ewma_add(&ant->rssi_ant, -rxdesc->rssi);
}
void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
*/
rt2x00dev->link.count = 0;
memset(qual, 0, sizeof(*qual));
+ ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+ RT2X00_EWMA_WEIGHT);
/*
* Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
* collect the RSSI data we could use this. Otherwise we
* must fallback to the default RSSI value.
*/
- if (!link->avg_rssi.avg || !qual->rx_success)
+ if (!qual->rx_success)
qual->rssi = DEFAULT_RSSI;
else
- qual->rssi = link->avg_rssi.avg;
+ qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
/*
* Check if link tuning is supported by the hardware, some hardware
* do not support link tuning at all, while other devices can disable
* the feature from the EEPROM.
*/
- if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_link_tuning(rt2x00dev))
rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
/*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
- if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f883802f3505..7c157857f5ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* of different types, but has no a separate filter for PS Poll frames,
* FIF_CONTROL flag implies FIF_PSPOLL.
*/
- if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
*total_flags |= FIF_CONTROL | FIF_PSPOLL;
}
- if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
if (*total_flags & FIF_CONTROL)
*total_flags |= FIF_PSPOLL;
}
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
return -EOPNOTSUPP;
/*
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_flush_queue(queue, drop);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index dc49e525ae5e..25da20e7e1f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -119,7 +119,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
rt2x00dev->ops = ops;
rt2x00dev->hw = hw;
rt2x00dev->irq = pci_dev->irq;
- rt2x00dev->name = pci_name(pci_dev);
+ rt2x00dev->name = ops->name;
if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c8a33b6ee22..50590b1420a5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
head_size += 8;
tail_size += 8;
}
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
{
- bool started;
bool tx_queue =
(queue->qid == QID_AC_VO) ||
(queue->qid == QID_AC_VI) ||
(queue->qid == QID_AC_BE) ||
(queue->qid == QID_AC_BK);
- mutex_lock(&queue->status_lock);
/*
- * If the queue has been started, we must stop it temporarily
- * to prevent any new frames to be queued on the device. If
- * we are not dropping the pending frames, the queue must
- * only be stopped in the software and not the hardware,
- * otherwise the queue will never become empty on its own.
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
*/
- started = test_bit(QUEUE_STARTED, &queue->flags);
- if (started) {
- /*
- * Pause the queue
- */
- rt2x00queue_pause_queue(queue);
-
- /*
- * If we are not supposed to drop any pending
- * frames, this means we must force a start (=kick)
- * to the queue to make sure the hardware will
- * start transmitting.
- */
- if (!drop && tx_queue)
- queue->rt2x00dev->ops->lib->kick_queue(queue);
- }
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
/*
* Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
if (unlikely(!rt2x00queue_empty(queue)))
rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
queue->qid);
-
- /*
- * Restore the queue to the previous status
- */
- if (started)
- rt2x00queue_unpause_queue(queue);
-
- mutex_unlock(&queue->status_lock);
}
EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 88289873c0cf..4e121627925d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
queue->qid);
+ rt2x00queue_stop_queue(queue);
rt2x00queue_flush_queue(queue, true);
+ rt2x00queue_start_queue(queue);
}
static int rt2x00usb_dma_timeout(struct data_queue *queue)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 54d3ddfc9888..a5b69cb49012 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else if (rt2x00_rf(rt2x00dev, RF2529)) {
- if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_double_antenna(rt2x00dev))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
} else {
low_bound = 0x20;
up_bound = 0x40;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* eeprom word.
*/
if (rt2x00_rf(rt2x00dev, RF2529) &&
- !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+ !rt2x00_has_cap_double_antenna(rt2x00dev)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+ if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
spec->num_channels = 14;
spec->channels = rf_vals_noseq;
} else {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1d3880e09a13..1baf9c896dcd 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
- temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
- && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+ (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+ !rt2x00_has_cap_frame_type(rt2x00dev));
/*
* Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
sel = antenna_sel_bg;
- lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+ lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain = 0;
if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
low_bound = 0x28;
up_bound = 0x48;
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
low_bound += 0x10;
up_bound += 0x10;
}
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
up_bound = 0x1c;
}
- if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
low_bound += 0x14;
up_bound += 0x10;
}
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
}
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
- if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+ if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
} else {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index fc207b268e4f..a91506b12a62 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 8bb4a9a01a18..9a78e3daf742 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1613,6 +1613,35 @@ err_free:
}
EXPORT_SYMBOL(rtl_send_smps_action);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
/* There seem to be issues in mac80211 regarding when del ba frames can be
* received. As a work around, we make a fake del_ba if we receive a ba_req;
* however, rx_agg was opened to let mac80211 release some ba related
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 0e5fe0902daf..0cd07420777a 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index 35e00086a520..0105e6c1901e 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -41,12 +41,12 @@
#define CAM_CONFIG_USEDK 1
#define CAM_CONFIG_NO_USEDK 0
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id);
+ u32 ul_key_id);
void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 733b7ce7f0e2..210ce7cd94d8 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
/*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mac->p2p = 0;
mac->vif = NULL;
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_LINKED;
mac->cnt_after_linked = 0;
mac->assoc_id = bss_conf->aid;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
if (rtlpriv->cfg->ops->linked_set_reg)
rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
- memset(mac->bssid, 0, 6);
+ memset(mac->bssid, 0, ETH_ALEN);
mac->vendor = PEER_UNKNOWN;
if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
- memcpy(mac->bssid, bss_conf->bssid, 6);
+ memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
rcu_read_lock();
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 838a1ed3f194..ae13fb94b2e8 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -1203,20 +1203,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
- int continual = true;
u16 efuse_addr = 0;
u8 hworden;
u8 efuse_data, word_cnts;
- while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
- && (efuse_addr < EFUSE_MAX_SIZE)) {
- if (efuse_data != 0xFF) {
- hworden = efuse_data & 0x0F;
- word_cnts = efuse_calculate_word_cnts(hworden);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- continual = false;
- }
+ while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ efuse_addr < EFUSE_MAX_SIZE) {
+ if (efuse_data == 0xFF)
+ break;
+
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
return efuse_addr;
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 395a326acfb4..1663b3afd41e 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -104,20 +104,19 @@ struct efuse_priv {
u8 tx_power_g[14];
};
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
- u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
- u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 703f839af6ca..0f494444bcd1 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
fail1:
if (hw)
ieee80211_free_hw(hw);
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtl_pci_disable_aspm(hw);
- pci_set_drvdata(pdev, NULL);
-
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_pci_disconnect);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index b68cae3024fc..e06971be7df7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
} else {
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index e655c0473225..d67f9c731cc4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
&bw40_pwr[0], channel);
}
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
index f1acd6d27e44..89f0f1ef1465 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -200,37 +200,35 @@ enum _ANT_DIV_TYPE {
CGCS_RX_SW_ANTDIV = 0x05,
};
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index c254693a1e6a..347af1e4f438 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.set_bw_mode = rtl88e_phy_set_bw_mode,
.switch_channel = rtl88e_phy_sw_chnl,
.dm_watchdog = rtl88e_dm_watchdog,
- .scan_operation_backup = rtl88e_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl88e_phy_set_rf_power_state,
.led_control = rtl88ee_led_control,
.set_desc = rtl88ee_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 68685a898257..aece6c9cccf1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index d2d57a27a7c1..e9caa5d4cff0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
- u8 h2c_parameter[3] = { 0 };
-
- return;
-
- if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
- } else {
- rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
- }
-
- if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
- } else {
- rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
- }
-
- h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
- h2c_parameter[0] = 0;
-
- rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
}
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
s8 cck_index = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
- s8 txpwr_level[2] = {0, 0};
+ s8 txpwr_level[3] = {0, 0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 246e5352f2e1..0c0e78263a66 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index cec10d696492..e79dabe9ba1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 3cfa1bb0f476..fa24de43ce79 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -152,8 +152,6 @@ enum version_8192c {
#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
- ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
#define IS_CHIP_VENDOR_UMC(version) \
((version & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index d5e3b704f930..94486cca4000 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -188,36 +186,29 @@ struct tx_power_struct {
};
bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
+ enum nl80211_channel_type ch_type);
void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
- u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
- u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+ u32 rfpath);
bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+ enum rf_pwrstate rfpwr_state);
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 offset);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
+ enum radio_path rfpath, u32 offset);
u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset, u32 data);
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset,
- u32 data);
+ enum radio_path rfpath, u32 offset,
+ u32 data);
void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data);
+ u32 regaddr, u32 bitmask, u32 data);
bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index bd4aef74c056..8922ecb47ad2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -560,7 +560,6 @@
#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
#define EEPROM_DEFAULT_HT20_DIFF 2
-#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
@@ -639,17 +638,8 @@
#define EEPROM_TXPWR_GROUP 0x6F
-#define EEPROM_TSSI_A 0x76
-#define EEPROM_TSSI_B 0x77
-#define EEPROM_THERMAL_METER 0x78
-
#define EEPROM_CHANNELPLAN 0x75
-#define RF_OPTION1 0x79
-#define RF_OPTION2 0x7A
-#define RF_OPTION3 0x7B
-#define RF_OPTION4 0x7C
-
#define STOPBECON BIT(6)
#define STOPHIGHT BIT(5)
#define STOPMGT BIT(4)
@@ -689,13 +679,6 @@
#define RSV_CTRL 0x001C
#define RD_CTRL 0x0524
-#define REG_USB_INFO 0xFE17
-#define REG_USB_SPECIAL_OPTION 0xFE55
-
-#define REG_USB_DMA_AGG_TO 0xFE5B
-#define REG_USB_AGG_TO 0xFE5C
-#define REG_USB_AGG_TH 0xFE5D
-
#define REG_USB_VID 0xFE60
#define REG_USB_PID 0xFE62
#define REG_USB_OPTIONAL 0xFE64
@@ -1196,9 +1179,6 @@
#define POLLING_LLT_THRESHOLD 20
#define POLLING_READY_TIMEOUT_COUNT 1000
-#define MAX_MSS_DENSITY_2T 0x13
-#define MAX_MSS_DENSITY_1T 0x0A
-
#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_LOAD 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 6c8d56efceae..d8fe68b389d2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,11 +34,10 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 14203561b6ee..b790320d2030 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92c_phy_set_rf_power_state,
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 6ad23b413eb3..52abf0a862fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index da4f587199ee..393685390f3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -32,6 +32,7 @@
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
+#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
return ret_val;
}
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
(hw, total_rssi /= rf_rx_num));
}
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi =
- rtlpriv->stats.ui_rssi.elements[rtlpriv->
- stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
- index++] = pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength =
- _rtl92c_translate_todbm(hw, (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself) {
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
- continue;
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
-
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] +
- 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.
- rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power =
- (rtlpriv->stats.recv_signal_power * 5 +
- pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb = 0;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- return;
- } else {
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- }
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb += 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92c_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm = 0, n_stream, tmpval;
-
- if (pstats->signalquality != 0) {
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.LINK_Q.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.LINK_Q.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm =
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index];
- rtlpriv->stats.LINK_Q.total_val -=
- last_evm;
- }
- rtlpriv->stats.LINK_Q.total_val +=
- pstats->signalquality;
- rtlpriv->stats.LINK_Q.elements
- [rtlpriv->stats.LINK_Q.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.LINK_Q.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.LINK_Q.index = 0;
- tmpval = rtlpriv->stats.LINK_Q.total_val /
- rtlpriv->stats.LINK_Q.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- for (n_stream = 0; n_stream < 2;
- n_stream++) {
- if (pstats->RX_SIGQ[n_stream] != -1) {
- if (!rtlpriv->stats.RX_EVM[n_stream]) {
- rtlpriv->stats.RX_EVM[n_stream]
- = pstats->RX_SIGQ[n_stream];
- }
- rtlpriv->stats.RX_EVM[n_stream] =
- ((rtlpriv->stats.RX_EVM
- [n_stream] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->RX_SIGQ
- [n_stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
- }
- } else {
- ;
- }
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
- _rtl92c_process_ui_rssi(hw, pcurrent_stats);
- _rtl92c_process_pwdb(hw, pcurrent_stats);
- _rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
- _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+ rtl_process_phyinfo(hw, tmp_buf, pstats);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 090fd33a158d..11b439d6b671 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -34,15 +34,14 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
+ enum radio_path rfpath);
void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel);
void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 2bd598526217..9936de716ad5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../usb.h"
#include "../efuse.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
- .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
.led_control = rtl92cu_led_control,
.enable_hw_sec = rtl92cu_enable_hw_security_config,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 5a060e537fbe..25e50ffc44ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -350,7 +350,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
@@ -365,7 +364,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
u8 *rxdesc;
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
struct rx_fwinfo_92c *p_drvinfo;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index f700f7a614b2..7908e1c85819 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[2];
+ u8 ofdm_index[3];
s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
+ u8 ofdm_index_old[3] = {0, 0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
+ if (cck_index >= CCK_TABLE_SIZE)
+ cck_index = CCK_TABLE_SIZE - 1;
+ if (cck_index < 0)
+ cck_index = 0;
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 7dd8f6de0550..c4a7db9135d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
* mac80211 will send pkt when scan */
void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92d_dm_init_edca_turbo(hw);
- return;
- switch (aci) {
- case AC1_BK:
- rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
- break;
- case AC0_BE:
- break;
- case AC2_VI:
- rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
- break;
- case AC3_VO:
- rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
- break;
- default:
- RT_ASSERT(false, "invalid aci: %d !\n", aci);
- break;
- }
}
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
index 7c9f7a2f1e42..1bc7b1a96d4a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all);
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
- u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+ u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
void rtl92de_suspend(struct ieee80211_hw *hw);
void rtl92de_resume(struct ieee80211_hw *hw);
void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 840bac5fa2f8..13196cc4b1d3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
}
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- rtlhal->current_bandtypebackup =
- rtlhal->current_bandtype;
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation\n");
- break;
- }
- }
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
index f074952bf25c..48d5c6835b6a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -39,9 +39,7 @@
#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define IQK_ADDA_REG_NUM 16
#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
@@ -127,34 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
*flag);
}
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum rf_content content,
enum radio_path rfpath);
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
unsigned long *flag);
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
index 0fe1a48593e8..7303d12c266f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -30,15 +30,13 @@
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
- bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index c18c04bf0c13..edab5a5351b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
.dm_watchdog = rtl92d_dm_watchdog,
- .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index b8ec718a0fab..945ddecf90c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -526,7 +526,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 84d1181795b8..c81c83591940 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -425,14 +425,9 @@
#define EXT_IMEM_CODE_DONE BIT(2)
#define IMEM_CHK_RPT BIT(1)
#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CODE_DONE BIT(0)
-#define IMEM_CHK_RPT BIT(1)
#define EMEM_CODE_DONE BIT(2)
#define EMEM_CHK_RPT BIT(3)
-#define DMEM_CODE_DONE BIT(4)
#define IMEM_RDY BIT(5)
-#define BASECHG BIT(6)
-#define FWRDY BIT(7)
#define LOAD_FW_READY (IMEM_CODE_DONE | \
IMEM_CHK_RPT | \
EMEM_CODE_DONE | \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index c7095118de6e..222d2e792ca6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -330,7 +330,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->rssi + 10;
- /*rx_status->noise = -stats->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index eafbb18dd48e..5d318a85eda4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
return pwrout_dbm;
}
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- enum io_type iotype;
-
- if (!is_hal_stop(rtlhal)) {
- switch (operation) {
- case SCAN_OPT_BACKUP:
- iotype = IO_CMD_PAUSE_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
-
- break;
- case SCAN_OPT_RESTORE:
- iotype = IO_CMD_RESUME_DM_BY_SCAN;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_IO_CMD,
- (u8 *)&iotype);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Unknown Scan Backup operation.\n");
- break;
- }
- }
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index e7a59eba351a..007ebdbbe108 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -183,42 +183,40 @@ struct tx_power_struct {
u32 mcs_original_offset[4][16];
};
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
- enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
- long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
- u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
- long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
- u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+ u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
index d0f9dd79abea..57f1933ee663 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -32,12 +32,11 @@
#define RF6052_MAX_TX_PWR 0x3F
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
- u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index d9ee2efffe5f..62b204faf773 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -33,6 +33,7 @@
#include "../core.h"
#include "../pci.h"
+#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_bw_mode = rtl8723ae_phy_set_bw_mode,
.switch_channel = rtl8723ae_phy_sw_chnl,
.dm_watchdog = rtl8723ae_dm_watchdog,
- .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
.led_control = rtl8723ae_led_control,
.set_desc = rtl8723ae_set_desc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index bcd82a1020a5..50b7be3f3a60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/*rx_status->qual = status->signal; */
rx_status->signal = status->recvsignalpower + 10;
- /*rx_status->noise = -status->noise; */
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e56778cac9bf..6e2b5c5c83c8 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status = {0};
struct rtl_stats stats = {
.signal = 0,
- .noise = -98,
.rate = 0,
};
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
unsigned int len)
{
+#if NET_IP_ALIGN != 0
unsigned int padding = 0;
+#endif
/* make function no-op when possible */
if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
return 0;
+#if NET_IP_ALIGN != 0
/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
/* TODO: deduplicate common code, define helper function instead? */
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
padding ^= NET_IP_ALIGN;
return padding;
+#endif
}
#define __RADIO_TAP_SIZE_RSV 32
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 703258742d28..d224dc3bb092 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -192,8 +192,6 @@ enum hardware_type {
(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
#define IS_HARDWARE_TYPE_8723(rtlhal) \
(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal) \
- (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
#define RX_HAL_IS_CCK_RATE(_pdesc)\
(_pdesc->rxmcs == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index c7dc6feab2ff..1342f81e683d 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
struct wl1251 *wl;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (!pdata) {
wl1251_error("no platform data");
return -ENODEV;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index fd02060038de..2c3bd1bff3f6 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 1c627da85083..be7129ba16ad 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -333,11 +333,11 @@ static struct wlcore_conf wl12xx_conf = {
.always = 0,
},
.fwlog = {
- .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 2,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
- .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
@@ -717,6 +717,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->fw_mem_block_size = 256;
+ wl->fwlog_end = 0x2000000;
+
/* common settings */
wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
@@ -1262,9 +1265,10 @@ static int wl12xx_boot(struct wl1271 *wl)
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
- MAX_TX_RETRY_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+ wl->ap_event_mask = MAX_TX_RETRY_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1648,6 +1652,11 @@ static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
return true;
}
+static u32 wl12xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ return hwaddr << 5;
+}
+
static int wl12xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl12xx_ops = {
@@ -1684,6 +1693,7 @@ static struct wlcore_ops wl12xx_ops = {
.channel_switch = wl12xx_cmd_channel_switch,
.pre_pkt_send = NULL,
.set_peer_cap = wl12xx_set_peer_cap,
+ .convert_hwaddr = wl12xx_convert_hwaddr,
.lnk_high_prio = wl12xx_lnk_high_prio,
.lnk_low_prio = wl12xx_lnk_low_prio,
};
@@ -1704,7 +1714,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
- struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
wl->rtable = wl12xx_rtable;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7aa0eb848c5a..ec37b16585df 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -456,11 +456,11 @@ static struct wlcore_conf wl18xx_conf = {
.always = 0,
},
.fwlog = {
- .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 2,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
- .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
@@ -505,7 +505,7 @@ static struct wlcore_conf wl18xx_conf = {
static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.ht = {
- .mode = HT_MODE_DEFAULT,
+ .mode = HT_MODE_WIDE,
},
.phy = {
.phy_standalone = 0x00,
@@ -516,7 +516,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
.low_band_component = COMPONENT_3_WAY_SWITCH,
- .low_band_component_type = 0x04,
+ .low_band_component_type = 0x05,
.high_band_component = COMPONENT_2_WAY_SWITCH,
.high_band_component_type = 0x09,
.tcxo_ldo_voltage = 0x00,
@@ -556,15 +556,15 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff },
.psat = 0,
- .low_power_val = 0x08,
- .med_power_val = 0x12,
- .high_power_val = 0x18,
- .low_power_val_2nd = 0x05,
- .med_power_val_2nd = 0x0a,
- .high_power_val_2nd = 0x14,
.external_pa_dc2dc = 0,
.number_of_assembled_ant2_4 = 2,
.number_of_assembled_ant5 = 1,
+ .low_power_val = 0xff,
+ .med_power_val = 0xff,
+ .high_power_val = 0xff,
+ .low_power_val_2nd = 0xff,
+ .med_power_val_2nd = 0xff,
+ .high_power_val_2nd = 0xff,
.tx_rf_margin = 1,
},
};
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
[REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
};
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
+ [CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
+ [CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
+};
+
static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
[CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
@@ -674,6 +686,9 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
goto out;
}
+ wl->fw_mem_block_size = 272;
+ wl->fwlog_end = 0x40000000;
+
wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
@@ -704,6 +719,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+ /* coex PLL configuration */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+ wl18xx_clk_table_coex[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+ wl18xx_clk_table_coex[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ /* bypass the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
wl18xx_clk_table[clk_freq].n);
if (ret < 0)
@@ -745,6 +777,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
}
+ /* choose WCS PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+ PLLSH_WL_PLL_SEL_WCS_PLL);
+ if (ret < 0)
+ goto out;
+
+ /* enable both PLLs */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+ if (ret < 0)
+ goto out;
+
+ udelay(1000);
+
+ /* disable coex PLL */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
+ /* reset the swallowing logic */
+ ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+ PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
@@ -935,10 +991,11 @@ static int wl18xx_boot(struct wl1271 *wl)
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
- MAX_TX_FAILURE_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID |
DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+ wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
+
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
@@ -1175,16 +1232,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
}
}
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+ switch (rdl_num) {
+ case RDL_1_HP:
+ return "183xH";
+ case RDL_2_SP:
+ return "183x or 180x";
+ case RDL_3_HP:
+ return "187xH";
+ case RDL_4_SP:
+ return "187x";
+ case RDL_5_SP:
+ return "RDL11 - Not Supported";
+ case RDL_6_SP:
+ return "180xD";
+ case RDL_7_SP:
+ return "RDL13 - Not Supported (1893Q)";
+ case RDL_8_SP:
+ return "18xxQ";
+ case RDL_NONE:
+ return "UNTRIMMED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u32 fuse;
- s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+ s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
if (ret < 0)
goto out;
@@ -1192,7 +1281,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
- if (rom <= 0xE)
+ if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
metal = (fuse & WL18XX_METAL_VER_MASK) >>
WL18XX_METAL_VER_OFFSET;
else
@@ -1204,11 +1293,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
goto out;
rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
- if (rdl_ver > RDL_MAX)
- rdl_ver = RDL_NONE;
- wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
- rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+ wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+ wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
if (ver)
*ver = pg_ver;
@@ -1521,6 +1608,11 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
return lnk->allocated_pkts < thold;
}
+static u32 wl18xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ return hwaddr & ~0x80000000;
+}
+
static int wl18xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl18xx_ops = {
@@ -1558,6 +1650,7 @@ static struct wlcore_ops wl18xx_ops = {
.pre_pkt_send = wl18xx_pre_pkt_send,
.sta_rc_update = wl18xx_sta_rc_update,
.set_peer_cap = wl18xx_set_peer_cap,
+ .convert_hwaddr = wl18xx_convert_hwaddr,
.lnk_high_prio = wl18xx_lnk_high_prio,
.lnk_low_prio = wl18xx_lnk_low_prio,
};
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 05dd8bad2746..a433a75f3cd7 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -114,6 +114,11 @@
#define PLATFORM_DETECTION 0xA0E3E0
#define OCS_EN 0xA02080
#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_COEX_PLL_N 0xA02384
+#define PLLSH_COEX_PLL_M 0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN 0xA0238E
+#define PLLSH_WL_PLL_SEL 0xA02398
+
#define PLLSH_WCS_PLL_N 0xA02362
#define PLLSH_WCS_PLL_M 0xA02360
#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
@@ -128,19 +133,30 @@
#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+#define PLLSH_WL_PLL_EN_VAL1 0x7
+#define PLLSH_WL_PLL_EN_VAL2 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
+
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+#define PLLSH_WL_PLL_SEL_WCS_PLL 0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL 0x1
+
#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
#define WL18XX_PG_VER_MASK 0x70
#define WL18XX_PG_VER_OFFSET 4
-#define WL18XX_ROM_VER_MASK 0x3
-#define WL18XX_ROM_VER_OFFSET 0
+#define WL18XX_ROM_VER_MASK 0x3e00
+#define WL18XX_ROM_VER_OFFSET 9
#define WL18XX_METAL_VER_MASK 0xC
#define WL18XX_METAL_VER_OFFSET 2
#define WL18XX_NEW_METAL_VER_MASK 0x180
#define WL18XX_NEW_METAL_VER_OFFSET 7
+#define WL18XX_PACKAGE_TYPE_OFFSET 13
+#define WL18XX_PACKAGE_TYPE_WSP 0
+
#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
#define WL18XX_RDL_VER_MASK 0x1f00
#define WL18XX_RDL_VER_OFFSET 8
@@ -201,24 +217,21 @@ enum {
NUM_BOARD_TYPES,
};
-enum {
+enum wl18xx_rdl_num {
RDL_NONE = 0,
RDL_1_HP = 1,
RDL_2_SP = 2,
RDL_3_HP = 3,
RDL_4_SP = 4,
+ RDL_5_SP = 0x11,
+ RDL_6_SP = 0x12,
+ RDL_7_SP = 0x13,
+ RDL_8_SP = 0x14,
_RDL_LAST,
RDL_MAX = _RDL_LAST - 1,
};
-static const char * const rdl_names[] = {
- [RDL_NONE] = "",
- [RDL_1_HP] = "1853 SISO",
- [RDL_2_SP] = "1857 MIMO",
- [RDL_3_HP] = "1893 SISO",
- [RDL_4_SP] = "1897 MIMO",
-};
/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
#define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 7a970cd9c555..ec83675a2446 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -162,7 +162,8 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
wl1271_debug(DEBUG_ACX, "acx mem map");
- ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len);
+ ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map,
+ sizeof(struct acx_header), len);
if (ret < 0)
return ret;
@@ -722,6 +723,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
wl1271_debug(DEBUG_ACX, "acx statistics");
ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
+ sizeof(struct acx_header),
wl->stats.fw_stats_len);
if (ret < 0) {
wl1271_warning("acx statistics failed: %d", ret);
@@ -1470,8 +1472,8 @@ int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
tsf_info->role_id = wlvif->role_id;
- ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO,
- tsf_info, sizeof(*tsf_info));
+ ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info,
+ sizeof(struct acx_header), sizeof(*tsf_info));
if (ret < 0) {
wl1271_warning("acx tsf info interrogate failed");
goto out;
@@ -1752,7 +1754,7 @@ int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
acx->role_id = wlvif->role_id;
ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL,
- acx, sizeof(*acx));
+ acx, sizeof(*acx), sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx roaming statistics failed: %d", ret);
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index c9e060795d13..34d9dfff2ad3 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,7 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+ if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -845,7 +846,8 @@ EXPORT_SYMBOL_GPL(wl1271_cmd_test);
* @buf: buffer for the response, including all headers, must work with dma
* @len: length of buf
*/
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+ size_t cmd_len, size_t res_len)
{
struct acx_header *acx = buf;
int ret;
@@ -854,10 +856,10 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
acx->id = cpu_to_le16(id);
- /* payload length, does not include any headers */
- acx->len = cpu_to_le16(len - sizeof(*acx));
+ /* response payload length, does not include any headers */
+ acx->len = cpu_to_le16(res_len - sizeof(*acx));
- ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
+ ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, cmd_len, res_len);
if (ret < 0)
wl1271_error("INTERROGATE command failed");
@@ -1126,6 +1128,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 template_id_2_4 = wl->scan_templ_id_2_4;
u16 template_id_5 = wl->scan_templ_id_5;
+ wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie_len);
if (!skb) {
@@ -1135,8 +1139,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (ie_len)
memcpy(skb_put(skb, ie_len), ie, ie_len);
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1174,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
if (!skb)
goto out;
- wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+ wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1609,43 @@ out:
static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
{
- int idx = -1;
-
+ /*
+ * map the given band/channel to the respective predefined
+ * bit expected by the fw
+ */
switch (band) {
- case IEEE80211_BAND_5GHZ:
- if (ch >= 8 && ch <= 16)
- idx = ((ch-8)/4 + 18);
- else if (ch >= 34 && ch <= 64)
- idx = ((ch-34)/2 + 3 + 18);
- else if (ch >= 100 && ch <= 140)
- idx = ((ch-100)/4 + 15 + 18);
- else if (ch >= 149 && ch <= 165)
- idx = ((ch-149)/4 + 26 + 18);
- else
- idx = -1;
- break;
case IEEE80211_BAND_2GHZ:
+ /* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
- idx = ch - 1;
- else
- idx = -1;
+ return ch - 1;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (ch) {
+ case 8 ... 16:
+ /* channels 8,12,16 are mapped to 18,19,20 */
+ return 18 + (ch-8)/4;
+ case 34 ... 48:
+ /* channels 34,36..48 are mapped to 21..28 */
+ return 21 + (ch-34)/2;
+ case 52 ... 64:
+ /* channels 52,56..64 are mapped to 29..32 */
+ return 29 + (ch-52)/4;
+ case 100 ... 140:
+ /* channels 100,104..140 are mapped to 33..43 */
+ return 33 + (ch-100)/4;
+ case 149 ... 165:
+ /* channels 149,153..165 are mapped to 44..48 */
+ return 44 + (ch-149)/4;
+ default:
+ break;
+ }
break;
default:
- wl1271_error("get reg conf ch idx - unknown band: %d",
- (int)band);
+ break;
}
- return idx;
+ wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+ return -1;
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1658,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
- if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+ if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index fd34123047cd..323d4a856e4b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -45,7 +45,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum ieee80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
-int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
+int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
+ size_t cmd_len, size_t res_len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
size_t len, unsigned long valid_rets);
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 2b96ff821341..40995c42bef8 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1274,6 +1274,9 @@ struct conf_rx_streaming_settings {
u8 always;
} __packed;
+#define CONF_FWLOG_MIN_MEM_BLOCKS 2
+#define CONF_FWLOG_MAX_MEM_BLOCKS 16
+
struct conf_fwlog {
/* Continuous or on-demand */
u8 mode;
@@ -1281,7 +1284,7 @@ struct conf_fwlog {
/*
* Number of memory blocks dedicated for the FW logger
*
- * Range: 1-3, or 0 to disable the FW logger
+ * Range: 2-16, or 0 to disable the FW logger
*/
u8 mem_blocks;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index e17630c2a849..89893c717025 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -437,6 +437,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
int res = 0;
ssize_t ret;
char *buf;
+ struct wl12xx_vif *wlvif;
#define DRIVER_STATE_BUF_LEN 1024
@@ -450,12 +451,28 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
+#define DRIVER_STATE_PRINT_GENERIC(x, fmt, args...) \
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
+ #x " = " fmt "\n", args))
+
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
#define DRIVER_STATE_PRINT_INT(x) DRIVER_STATE_PRINT(x, "%d")
#define DRIVER_STATE_PRINT_STR(x) DRIVER_STATE_PRINT(x, "%s")
#define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx")
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
+
+ DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+ wlvif->p2p ? "P2P-CL" : "STA");
+ }
+
+ wl12xx_for_each_wlvif_ap(wl, wlvif)
+ DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
+ wlvif->p2p ? "P2P-GO" : "AP");
+
DRIVER_STATE_PRINT_INT(tx_blocks_available);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
@@ -474,7 +491,6 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(channel);
DRIVER_STATE_PRINT_INT(band);
DRIVER_STATE_PRINT_INT(power_level);
DRIVER_STATE_PRINT_INT(sg_enabled);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 67f61689b49e..8d3b34965db3 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -266,6 +266,7 @@ int wl1271_event_unmask(struct wl1271 *wl)
{
int ret;
+ wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 7fd260c02a0a..51f8d634d32f 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -222,6 +222,15 @@ wlcore_hw_set_peer_cap(struct wl1271 *wl,
return 0;
}
+static inline u32
+wlcore_hw_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
+{
+ if (!wl->ops->convert_hwaddr)
+ BUG_ON(1);
+
+ return wl->ops->convert_hwaddr(wl, hwaddr);
+}
+
static inline bool
wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5c6f11e157d9..7699f9d07e26 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -571,6 +571,12 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
if (ret < 0)
return ret;
+
+ /* unmask ap events */
+ wl->event_mask |= wl->ap_event_mask;
+ ret = wl1271_event_unmask(wl);
+ if (ret < 0)
+ return ret;
/* first STA, no APs */
} else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index af7d9f9b3b4d..07e3d6a049ad 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -165,8 +165,8 @@ static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
int physical;
int addr;
- /* Addresses are stored internally as addresses to 32 bytes blocks */
- addr = hwaddr << 5;
+ /* Convert from FW internal address which is chip arch dependent */
+ addr = wl->ops->convert_hwaddr(wl, hwaddr);
physical = wlcore_translate_addr(wl, addr);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 38995f90040d..0368b9cbfb89 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -44,6 +44,7 @@
#define WL1271_BOOT_RETRIES 3
static char *fwlog_param;
+static int fwlog_mem_blocks = -1;
static int bug_on_recovery = -1;
static int no_recovery = -1;
@@ -291,6 +292,18 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
{
/* Adjust settings according to optional module parameters */
+ /* Firmware Logger params */
+ if (fwlog_mem_blocks != -1) {
+ if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
+ fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
+ wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
+ } else {
+ wl1271_error(
+ "Illegal fwlog_mem_blocks=%d using default %d",
+ fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
+ }
+ }
+
if (fwlog_param) {
if (!strcmp(fwlog_param, "continuous")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
@@ -780,6 +793,7 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
if (wl->state == WLCORE_STATE_ON) {
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+ wl1271_ps_elp_wakeup(wl);
wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
@@ -787,19 +801,10 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
{
- size_t len = 0;
-
- /* The FW log is a length-value list, find where the log end */
- while (len < maxlen) {
- if (memblock[len] == 0)
- break;
- if (len + memblock[len] + 1 > maxlen)
- break;
- len += memblock[len] + 1;
- }
+ size_t len;
/* Make sure we have enough room */
- len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
+ len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
/* Fill the FW log file, consumed by the sysfs fwlog entry */
memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -808,10 +813,9 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
return len;
}
-#define WLCORE_FW_LOG_END 0x2000000
-
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
+ struct wlcore_partition_set part, old_part;
u32 addr;
u32 offset;
u32 end_of_log;
@@ -824,7 +828,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl1271_info("Reading FW panic log");
- block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
+ block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
if (!block)
return;
@@ -850,17 +854,31 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
- end_of_log = WLCORE_FW_LOG_END;
+ end_of_log = wl->fwlog_end;
} else {
offset = sizeof(addr);
end_of_log = addr;
}
+ old_part = wl->curr_part;
+ memset(&part, 0, sizeof(part));
+
/* Traverse the memory blocks linked list */
do {
- memset(block, 0, WL12XX_HW_BLOCK_SIZE);
- ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
- false);
+ part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
+ part.mem.size = PAGE_SIZE;
+
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0) {
+ wl1271_error("%s: set_partition start=0x%X size=%d",
+ __func__, part.mem.start, part.mem.size);
+ goto out;
+ }
+
+ memset(block, 0, wl->fw_mem_block_size);
+ ret = wlcore_read_hwaddr(wl, addr, block,
+ wl->fw_mem_block_size, false);
+
if (ret < 0)
goto out;
@@ -871,8 +889,9 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* on demand mode and is equal to 0x2000000 in continuous mode.
*/
addr = le32_to_cpup((__le32 *)block);
+
if (!wl12xx_copy_fwlog(wl, block + offset,
- WL12XX_HW_BLOCK_SIZE - offset))
+ wl->fw_mem_block_size - offset))
break;
} while (addr && (addr != end_of_log));
@@ -880,6 +899,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
out:
kfree(block);
+ wlcore_set_partition(wl, &old_part);
}
static void wlcore_print_recovery(struct wl1271 *wl)
@@ -924,7 +944,8 @@ static void wl1271_recovery_work(struct work_struct *work)
goto out_unlock;
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
- wl12xx_read_fwlog_panic(wl);
+ if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
+ wl12xx_read_fwlog_panic(wl);
wlcore_print_recovery(wl);
}
@@ -1062,7 +1083,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
static const char* const PLT_MODE[] = {
"PLT_OFF",
"PLT_ON",
- "PLT_FEM_DETECT"
+ "PLT_FEM_DETECT",
+ "PLT_CHIP_AWAKE"
};
int ret;
@@ -1088,9 +1110,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
if (ret < 0)
goto power_off;
- ret = wl->ops->plt_init(wl);
- if (ret < 0)
- goto power_off;
+ if (plt_mode != PLT_CHIP_AWAKE) {
+ ret = wl->ops->plt_init(wl);
+ if (ret < 0)
+ goto power_off;
+ }
wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -1925,8 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
/*
* FW channels must be re-calibrated after recovery,
- * clear the last Reg-Domain channel configuration.
+ * save current Reg-Domain channel configuration and clear it.
*/
+ memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
+ sizeof(wl->reg_ch_conf_pending));
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
@@ -2008,6 +2034,47 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
+ unsigned long time_spare;
+ int ret;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wlvif = container_of(dwork, struct wl12xx_vif,
+ pending_auth_complete_work);
+ wl = wlvif->wl;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ /*
+ * Make sure a second really passed since the last auth reply. Maybe
+ * a second auth reply arrived while we were stuck on the mutex.
+ * Check for a little less than the timeout to protect from scheduler
+ * irregularities.
+ */
+ time_spare = jiffies +
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+ if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* cancel the ROC if active */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2226,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
wlcore_connection_loss_work);
+ INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+ wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2445,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 role_type;
+ if (wl->plt) {
+ wl1271_error("Adding Interface not allowed while in PLT mode");
+ return -EBUSY;
+ }
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -2572,6 +2646,12 @@ deinit:
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
goto unlock;
+ if (wl->ap_count == 0 && is_ap) {
+ /* mask ap events */
+ wl->event_mask &= ~wl->ap_event_mask;
+ wl1271_event_unmask(wl);
+ }
+
if (wl->ap_count == 0 && is_ap && wl->sta_count) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
/* Configure for power according to debugfs */
@@ -2590,6 +2670,7 @@ unlock:
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
+ cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
mutex_lock(&wl->mutex);
}
@@ -2875,6 +2956,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->rate_set = wlvif->basic_rate_set;
}
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
+{
+ bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return;
+
+ if (idle) {
+ clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ } else {
+ /* The current firmware only supports sched_scan in idle */
+ if (wl->sched_vif == wlvif)
+ wl->ops->sched_scan_stop(wl, wlvif);
+
+ set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+ }
+}
+
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
@@ -3969,6 +4069,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
}
} else {
if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ /*
+ * AP might be in ROC in case we have just
+ * sent auth reply. handle it.
+ */
+ if (test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
+
ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
@@ -4120,6 +4227,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE && !is_ibss)
+ wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4766,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
}
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif,
- struct wl1271_station *wl_sta,
- bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn)
{
- if (in_connection) {
- if (WARN_ON(wl_sta->in_connection))
+ if (in_conn) {
+ if (WARN_ON(wl_sta && wl_sta->in_connection))
return;
- wl_sta->in_connection = true;
- if (!wlvif->inconn_count++)
+
+ if (!wlvif->ap_pending_auth_reply &&
+ !wlvif->inconn_count)
wlcore_roc_if_possible(wl, wlvif);
+
+ if (wl_sta) {
+ wl_sta->in_connection = true;
+ wlvif->inconn_count++;
+ } else {
+ wlvif->ap_pending_auth_reply = true;
+ }
} else {
- if (!wl_sta->in_connection)
+ if (wl_sta && !wl_sta->in_connection)
return;
- wl_sta->in_connection = false;
- wlvif->inconn_count--;
- if (WARN_ON(wlvif->inconn_count < 0))
+ if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
return;
- if (!wlvif->inconn_count)
- if (test_bit(wlvif->role_id, wl->roc_map))
- wl12xx_croc(wl, wlvif->role_id);
+ if (WARN_ON(wl_sta && !wlvif->inconn_count))
+ return;
+
+ if (wl_sta) {
+ wl_sta->in_connection = false;
+ wlvif->inconn_count--;
+ } else {
+ wlvif->ap_pending_auth_reply = false;
+ }
+
+ if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+ test_bit(wlvif->role_id, wl->roc_map))
+ wl12xx_croc(wl, wlvif->role_id);
}
}
@@ -5313,10 +5443,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
- { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +6023,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
};
#endif
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
- struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
unsigned long irqflags;
int ret;
+ irq_handler_t hardirq_fn = NULL;
if (fw) {
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6065,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
wl->platform_quirks = pdata->platform_quirks;
wl->if_ops = pdev_data->if_ops;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
irqflags = IRQF_TRIGGER_RISING;
- else
+ hardirq_fn = wlcore_hardirq;
+ } else {
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ }
- ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+ ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
irqflags, pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
@@ -6046,6 +6181,9 @@ module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, ondemand, dbgpins or disable");
+module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
+
module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 98066d40c2ad..26bfc365ba70 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
struct wl12xx_vif *wlvif;
u32 timeout;
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return;
+
if (wl->sleep_auth != WL1271_PSM_ELP)
return;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index f407101e525b..7ed86203304b 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -92,9 +92,31 @@ out:
static void wlcore_started_vifs_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool active = false;
int *count = (int *)data;
- if (!vif->bss_conf.idle)
+ /*
+ * count active interfaces according to interface type.
+ * checking only bss_conf.idle is bad for some cases, e.g.
+ * we don't want to count sta in p2p_find as active interface.
+ */
+ switch (wlvif->bss_type) {
+ case BSS_TYPE_STA_BSS:
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ active = true;
+ break;
+
+ case BSS_TYPE_AP_BSS:
+ if (wlvif->wl->active_sta_count > 0)
+ active = true;
+ break;
+
+ default:
+ break;
+ }
+
+ if (active)
(*count)++;
}
@@ -174,17 +196,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
- wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
- req_channels[i]->band,
- req_channels[i]->center_freq);
- wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
- req_channels[i]->hw_value,
- req_channels[i]->flags);
- wl1271_debug(DEBUG_SCAN, "max_power %d",
- req_channels[i]->max_power);
- wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
- min_dwell_time_active,
- max_dwell_time_active);
+
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +234,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
*n_pactive_ch);
}
+ wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+ req_channels[i]->center_freq,
+ req_channels[i]->hw_value,
+ req_channels[i]->flags,
+ req_channels[i]->max_power,
+ min_dwell_time_active,
+ max_dwell_time_active,
+ flags & IEEE80211_CHAN_RADAR ?
+ ", DFS" : "",
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ ", PASSIVE" : "");
j++;
}
}
@@ -364,7 +387,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct cfg80211_ssid *ssids = req->ssids;
int ret = 0, type, i, j, n_match_ssids = 0;
- wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+ wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
/* count the match sets that contain SSIDs */
for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +465,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
}
}
- wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 1b0cd98e35f1..b2c018dccf18 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
if (!pdev_data)
goto out;
- pdev_data->pdata = spi->dev.platform_data;
+ pdev_data->pdata = dev_get_platdata(&spi->dev);
if (!pdev_data->pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 527590f2adfb..ddad58f614da 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -179,7 +179,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out_sleep;
}
- ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
+ ret = wl1271_cmd_interrogate(wl, ie_id, cmd,
+ sizeof(struct acx_header), sizeof(*cmd));
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
goto out_free;
@@ -297,7 +298,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
ret = wl1271_plt_stop(wl);
break;
case PLT_ON:
- ret = wl1271_plt_start(wl, PLT_ON);
+ case PLT_CHIP_AWAKE:
+ ret = wl1271_plt_start(wl, val);
break;
case PLT_FEM_DETECT:
ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +363,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ u32 nla_cmd;
int err;
err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +373,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[WL1271_TM_ATTR_CMD_ID])
return -EINVAL;
- switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+ /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+ if (wl->plt_mode == PLT_CHIP_AWAKE &&
+ nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+ return -EOPNOTSUPP;
+
+ switch (nla_cmd) {
case WL1271_TM_CMD_TEST:
return wl1271_tm_cmd_test(wl, tb);
case WL1271_TM_CMD_INTERROGATE:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7e93fe63a2c7..87cd707affa2 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (!ieee80211_is_auth(hdr->frame_control))
+ return;
+
/*
* add the station to the known list before transmitting the
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- hdr = (struct ieee80211_hdr *)(skb->data +
- sizeof(struct wl1271_tx_hw_descr));
- if (ieee80211_is_auth(hdr->frame_control))
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+ /*
+ * ROC for 1 second on the AP channel for completing the connection.
+ * Note the ROC will be continued by the update_sta_state callbacks
+ * once the station reaches the associated state.
+ */
+ wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+ wlvif->pending_auth_reply_time = jiffies;
+ cancel_delayed_work(&wlvif->pending_auth_complete_work);
+ ieee80211_queue_delayed_work(wl->hw,
+ &wlvif->pending_auth_complete_work,
+ msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+ if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
- wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
wl1271_tx_regulate_link(wl, wlvif, hlid);
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 55aa4acf9105..35489c300da1 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -56,6 +56,9 @@
/* Used for management frames and dummy packets */
#define WL1271_TID_MGMT 7
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000
+
struct wl127x_tx_mem {
/*
* Number of extra memory blocks to allocate for this packet
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0034979e97cb..06efc12a39e5 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -110,6 +110,7 @@ struct wlcore_ops {
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid);
+ u32 (*convert_hwaddr)(struct wl1271 *wl, u32 hwaddr);
bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk);
bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
@@ -290,6 +291,12 @@ struct wl1271 {
/* Number of valid bytes in the FW log buffer */
ssize_t fwlog_size;
+ /* FW log end marker */
+ u32 fwlog_end;
+
+ /* FW memory block size */
+ u32 fw_mem_block_size;
+
/* Sysfs FW log entry readers wait queue */
wait_queue_head_t fwlog_waitq;
@@ -307,6 +314,8 @@ struct wl1271 {
/* The mbox event mask */
u32 event_mask;
+ /* events to unmask only when ap interface is up */
+ u32 ap_event_mask;
/* Mailbox pointers */
u32 mbox_size;
@@ -481,6 +490,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf);
void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct wl1271_station *wl_sta, bool in_conn);
static inline void
wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e5e146435fe7..ce7261ce8b59 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
WLVIF_FLAG_CS_PROGRESS,
WLVIF_FLAG_AP_PROBE_RESP_SET,
WLVIF_FLAG_IN_USE,
+ WLVIF_FLAG_ACTIVE,
};
struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
PLT_OFF = 0,
PLT_ON = 1,
PLT_FEM_DETECT = 2,
+ PLT_CHIP_AWAKE = 3
};
struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
*/
int hw_queue_base;
+ /* do we have a pending auth reply? (and ROC) */
+ bool ap_pending_auth_reply;
+
+ /* time when we sent the pending auth reply */
+ unsigned long pending_auth_reply_time;
+
+ /* work for canceling ROC after pending auth reply */
+ struct delayed_work pending_auth_complete_work;
+
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -539,6 +550,4 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
#define HW_HT_RATES_OFFSET 16
#define HW_MIMO_RATES_OFFSET 24
-#define WL12XX_HW_BLOCK_SIZE 256
-
#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 400fea1de080..08ae01b41c83 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -87,9 +87,13 @@ struct pending_tx_info {
struct xenvif_rx_meta {
int id;
int size;
+ int gso_type;
int gso_size;
};
+#define GSO_BIT(type) \
+ (1 << XEN_NETIF_GSO_TYPE_ ## type)
+
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
@@ -150,10 +154,12 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Frontend feature information. */
+ int gso_mask;
+ int gso_prefix_mask;
+
u8 can_sg:1;
- u8 gso:1;
- u8 gso_prefix:1;
- u8 csum:1;
+ u8 ip_csum:1;
+ u8 ipv6_csum:1;
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 459935a6bfae..b78ee10a956a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg)
features &= ~NETIF_F_SG;
- if (!vif->gso && !vif->gso_prefix)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
- if (!vif->csum)
+ if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+ features &= ~NETIF_F_TSO6;
+ if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
+ if (!vif->ipv6_csum)
+ features &= ~NETIF_F_IPV6_CSUM;
return features;
}
@@ -306,7 +310,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
- vif->csum = 1;
+ vif->ip_csum = 1;
vif->dev = dev;
vif->credit_bytes = vif->remaining_credit = ~0UL;
@@ -315,8 +319,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->credit_window_start = get_jiffies_64();
dev->netdev_ops = &xenvif_netdev_ops;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- dev->features = dev->hw_features;
+ dev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features = dev->hw_features | NETIF_F_RXCSUM;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 900da4b243ad..919b6509455c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
*/
-#define PKT_PROT_LEN (ETH_HLEN + \
- VLAN_HLEN + \
- sizeof(struct iphdr) + MAX_IPOPTLEN + \
- sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
- if (vif->can_sg || vif->gso || vif->gso_prefix)
+ if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
+ int gso_type;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ else
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
unsigned char *data;
int head = 1;
int old_meta_prod;
+ int gso_type;
+ int gso_size;
old_meta_prod = npo->meta_prod;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ gso_size = skb_shinfo(skb)->gso_size;
+ } else {
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ gso_size = 0;
+ }
+
/* Set up a GSO prefix descriptor, if necessary */
- if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+ if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- meta->gso_size = skb_shinfo(skb)->gso_size;
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
- if (!vif->gso_prefix)
- meta->gso_size = skb_shinfo(skb)->gso_size;
- else
+ if ((1 << gso_type) & vif->gso_mask) {
+ meta->gso_type = gso_type;
+ meta->gso_size = gso_size;
+ } else {
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
+ }
meta->size = 0;
meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif = netdev_priv(skb->dev);
- if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
vif->meta[npo.meta_cons].size,
flags);
- if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ if ((1 << vif->meta[npo.meta_cons].gso_type) &
+ vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
resp->flags |= XEN_NETRXF_extra_info;
+ gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
- gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return -EINVAL;
}
- /* Currently only TCPv4 S.O. is supported. */
- if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ switch (gso->u.gso.type) {
+ case XEN_NETIF_GSO_TYPE_TCPV4:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ break;
+ case XEN_NETIF_GSO_TYPE_TCPV6:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+ default:
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ int target = min_t(int, skb->len, MAX_TCP_HEADER);
+ __pskb_pull_tail(skb, target - skb_headlen(skb));
+ }
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
{
- struct iphdr *iph;
+ struct iphdr *iph = (void *)skb->data;
+ unsigned int header_size;
+ unsigned int off;
int err = -EPROTO;
- int recalculate_partial_csum = 0;
- /*
- * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
- * peers can fail to set NETRXF_csum_blank when sending a GSO
- * frame. In this case force the SKB to CHECKSUM_PARTIAL and
- * recalculate the partial checksum.
- */
- if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- vif->rx_gso_checksum_fixup++;
- skb->ip_summed = CHECKSUM_PARTIAL;
- recalculate_partial_csum = 1;
- }
+ off = sizeof(struct iphdr);
- /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ header_size = skb->network_header + off + MAX_IPOPTLEN;
+ maybe_pull_tail(skb, header_size);
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
+ off = iph->ihl * 4;
- iph = (void *)skb->data;
switch (iph->protocol) {
case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+ if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - iph->ihl*4,
+ skb->len - off,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
@@ -1183,6 +1226,158 @@ out:
return err;
}
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+ int recalculate_partial_csum)
+{
+ int err = -EPROTO;
+ struct ipv6hdr *ipv6h = (void *)skb->data;
+ u8 nexthdr;
+ unsigned int header_size;
+ unsigned int off;
+ bool fragment;
+ bool done;
+
+ done = false;
+
+ off = sizeof(struct ipv6hdr);
+
+ header_size = skb->network_header + off;
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = ipv6h->nexthdr;
+
+ while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+ !done) {
+ switch (nexthdr) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING: {
+ struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ipv6_opt_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ break;
+ }
+ case IPPROTO_AH: {
+ struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct ip_auth_hdr);
+ maybe_pull_tail(skb, header_size);
+
+ nexthdr = hp->nexthdr;
+ off += (hp->hdrlen+2)<<2;
+ break;
+ }
+ case IPPROTO_FRAGMENT:
+ fragment = true;
+ /* fall through */
+ default:
+ done = true;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Failed to parse packet header\n");
+ goto out;
+ }
+
+ if (fragment) {
+ if (net_ratelimit())
+ netdev_err(vif->dev, "Packet is a fragment!\n");
+ goto out;
+ }
+
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct tcphdr);
+ maybe_pull_tail(skb, header_size);
+
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
+ }
+ break;
+ case IPPROTO_UDP:
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check)))
+ goto out;
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ header_size = skb->network_header +
+ off +
+ sizeof(struct udphdr);
+ maybe_pull_tail(skb, header_size);
+
+ udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+ &ipv6h->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
+ }
+ break;
+ default:
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Attempting to checksum a non-TCP/UDP packet, "
+ "dropping a protocol %d packet\n",
+ nexthdr);
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+ int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ vif->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
+
+ return err;
+}
+
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
u64 now = get_jiffies_64();
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
xenvif_fill_frags(vif, skb);
- /*
- * If the initial fragment was < PKT_PROT_LEN then
- * pull through some bytes from the other fragments to
- * increase the linear region to PKT_PROT_LEN bytes.
- */
- if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+ if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1b08d8798372..f0358992b04f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -105,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
/* We support rx-copy path. */
err = xenbus_printf(xbt, dev->nodename,
"feature-rx-copy", "%d", 1);
@@ -561,20 +577,50 @@ static int connect_rings(struct backend_info *be)
val = 0;
vif->can_sg = !!val;
+ vif->gso_mask = 0;
+ vif->gso_prefix_mask = 0;
+
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0)
val = 0;
- vif->gso = !!val;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0)
val = 0;
- vif->gso_prefix = !!val;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_mask |= GSO_BIT(TCPV6);
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+ "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+ if (vif->gso_mask & vif->gso_prefix_mask) {
+ xenbus_dev_fatal(dev, err,
+ "%s: gso and gso prefix flags are not "
+ "mutually exclusive",
+ dev->otherend);
+ return -EOPNOTSUPP;
+ }
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0)
val = 0;
- vif->csum = !val;
+ vif->ip_csum = !val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+ "%d", &val) < 0)
+ val = 0;
+ vif->ipv6_csum = !!val;
/* Map the shared frame, irq etc. */
err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 36808bf25677..dd1011e55cb5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
u64_stats_update_end(&stats->syncp);
/* Pass it up. */
- netif_receive_skb(skb);
+ napi_gro_receive(&np->napi, skb);
}
return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
if (work_done < budget) {
int more_to_do = 0;
+ napi_gro_flush(napi, false);
+
local_irq_save(flags);
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);