diff options
Diffstat (limited to 'drivers/net')
497 files changed, 22410 insertions, 8916 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5abef8a3b775..3d56339a8a10 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1468,97 +1468,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev, return features; } -#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_GSO_ENCAP_ALL | \ - NETIF_F_HIGHDMA | NETIF_F_LRO) - -#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_GSO_PARTIAL) - -#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_GSO_SOFTWARE) - -#define BOND_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP) - - -static void bond_compute_features(struct bonding *bond) -{ - netdev_features_t gso_partial_features = BOND_GSO_PARTIAL_FEATURES; - unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | - IFF_XMIT_DST_RELEASE_PERM; - netdev_features_t vlan_features = BOND_VLAN_FEATURES; - netdev_features_t enc_features = BOND_ENC_FEATURES; -#ifdef CONFIG_XFRM_OFFLOAD - netdev_features_t xfrm_features = BOND_XFRM_FEATURES; -#endif /* CONFIG_XFRM_OFFLOAD */ - netdev_features_t mpls_features = BOND_MPLS_FEATURES; - struct net_device *bond_dev = bond->dev; - struct list_head *iter; - struct slave *slave; - unsigned short max_hard_header_len = ETH_HLEN; - unsigned int tso_max_size = TSO_MAX_SIZE; - u16 tso_max_segs = TSO_MAX_SEGS; - - if (!bond_has_slaves(bond)) - goto done; - - vlan_features = netdev_base_features(vlan_features); - mpls_features = netdev_base_features(mpls_features); - - bond_for_each_slave(bond, slave, iter) { - vlan_features = netdev_increment_features(vlan_features, - slave->dev->vlan_features, BOND_VLAN_FEATURES); - - enc_features = netdev_increment_features(enc_features, - slave->dev->hw_enc_features, - BOND_ENC_FEATURES); - -#ifdef CONFIG_XFRM_OFFLOAD - xfrm_features = netdev_increment_features(xfrm_features, - slave->dev->hw_enc_features, - BOND_XFRM_FEATURES); -#endif /* CONFIG_XFRM_OFFLOAD */ - - gso_partial_features = netdev_increment_features(gso_partial_features, - slave->dev->gso_partial_features, - BOND_GSO_PARTIAL_FEATURES); - - mpls_features = netdev_increment_features(mpls_features, - slave->dev->mpls_features, - BOND_MPLS_FEATURES); - - dst_release_flag &= slave->dev->priv_flags; - if (slave->dev->hard_header_len > max_hard_header_len) - max_hard_header_len = slave->dev->hard_header_len; - - tso_max_size = min(tso_max_size, slave->dev->tso_max_size); - tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs); - } - bond_dev->hard_header_len = max_hard_header_len; - -done: - bond_dev->gso_partial_features = gso_partial_features; - bond_dev->vlan_features = vlan_features; - bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; -#ifdef CONFIG_XFRM_OFFLOAD - bond_dev->hw_enc_features |= xfrm_features; -#endif /* CONFIG_XFRM_OFFLOAD */ - bond_dev->mpls_features = mpls_features; - netif_set_tso_max_segs(bond_dev, tso_max_segs); - netif_set_tso_max_size(bond_dev, tso_max_size); - - bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; - if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && - dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) - bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; - - netdev_change_features(bond_dev); -} - static void bond_setup_by_slave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -2273,7 +2182,7 @@ skip_mac_set: } bond->slave_cnt++; - bond_compute_features(bond); + netdev_compute_master_upper_features(bond->dev, true); bond_set_carrier(bond); /* Needs to be called before bond_select_active_slave(), which will @@ -2528,7 +2437,7 @@ static int __bond_release_one(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); } - bond_compute_features(bond); + netdev_compute_master_upper_features(bond->dev, true); if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && (old_features & NETIF_F_VLAN_CHALLENGED)) slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n"); @@ -4028,7 +3937,7 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_FEAT_CHANGE: if (!bond->notifier_ctx) { bond->notifier_ctx = true; - bond_compute_features(bond); + netdev_compute_master_upper_features(bond->dev, true); bond->notifier_ctx = false; } break; @@ -6011,7 +5920,7 @@ void bond_setup(struct net_device *bond_dev) * capable */ - bond_dev->hw_features = BOND_VLAN_FEATURES | + bond_dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_RX | diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 191707d7e3da..c2a3a4eef5b2 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -948,7 +948,6 @@ static const struct net_device_ops at91_netdev_ops = { .ndo_open = at91_open, .ndo_stop = at91_close, .ndo_start_xmit = at91_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops at91_ethtool_ops = { diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index 333ad42ea73b..baf494d20bef 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -227,7 +227,7 @@ static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) * mask mode with 32 bits width. */ - /* Enter filter initialization mode and assing filters to CAN + /* Enter filter initialization mode and assign filters to CAN * controllers. */ regmap_update_bits(priv->gcan, BXCAN_FMR_REG, @@ -881,7 +881,6 @@ static const struct net_device_ops bxcan_netdev_ops = { .ndo_open = bxcan_open, .ndo_stop = bxcan_stop, .ndo_start_xmit = bxcan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops bxcan_ethtool_ops = { diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index cc371d0c9f3c..3702cac7fbf0 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -1362,7 +1362,6 @@ static const struct net_device_ops c_can_netdev_ops = { .ndo_open = c_can_open, .ndo_stop = c_can_close, .ndo_start_xmit = c_can_start_xmit, - .ndo_change_mtu = can_change_mtu, }; int register_c_can_dev(struct net_device *dev) diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c index 24af63961030..b66fc16aedd2 100644 --- a/drivers/net/can/can327.c +++ b/drivers/net/can/can327.c @@ -849,7 +849,6 @@ static const struct net_device_ops can327_netdev_ops = { .ndo_open = can327_netdev_open, .ndo_stop = can327_netdev_close, .ndo_start_xmit = can327_netdev_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops can327_ethtool_ops = { diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 30909f3aab57..8d5abd643c06 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -834,7 +834,6 @@ static const struct net_device_ops cc770_netdev_ops = { .ndo_open = cc770_open, .ndo_stop = cc770_close, .ndo_start_xmit = cc770_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops cc770_ethtool_ops = { diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index 8bd3f0fc385c..1e6b9e3dc2fe 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -1301,7 +1301,6 @@ static const struct net_device_ops ctucan_netdev_ops = { .ndo_open = ctucan_open, .ndo_stop = ctucan_close, .ndo_start_xmit = ctucan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ctucan_ethtool_ops = { diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 15ccedbb3f8d..80e1ab18de87 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -359,44 +359,6 @@ void can_set_default_mtu(struct net_device *dev) } } -/* changing MTU and control mode for CAN/CANFD devices */ -int can_change_mtu(struct net_device *dev, int new_mtu) -{ - struct can_priv *priv = netdev_priv(dev); - u32 ctrlmode_static = can_get_static_ctrlmode(priv); - - /* Do not allow changing the MTU while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* allow change of MTU according to the CANFD ability of the device */ - switch (new_mtu) { - case CAN_MTU: - /* 'CANFD-only' controllers can not switch to CAN_MTU */ - if (ctrlmode_static & CAN_CTRLMODE_FD) - return -EINVAL; - - priv->ctrlmode &= ~CAN_CTRLMODE_FD; - break; - - case CANFD_MTU: - /* check for potential CANFD ability */ - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && - !(ctrlmode_static & CAN_CTRLMODE_FD)) - return -EINVAL; - - priv->ctrlmode |= CAN_CTRLMODE_FD; - break; - - default: - return -EINVAL; - } - - WRITE_ONCE(dev->mtu, new_mtu); - return 0; -} -EXPORT_SYMBOL_GPL(can_change_mtu); - /* helper to define static CAN controller features at device creation time */ int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode) { @@ -417,34 +379,33 @@ int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode) } EXPORT_SYMBOL_GPL(can_set_static_ctrlmode); -/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices +/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices * supporting hardware timestamps */ -int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd) +int can_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) { - struct hwtstamp_config hwts_cfg = { 0 }; - - switch (cmd) { - case SIOCSHWTSTAMP: /* set */ - if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg))) - return -EFAULT; - if (hwts_cfg.tx_type == HWTSTAMP_TX_ON && - hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL) - return 0; - return -ERANGE; - - case SIOCGHWTSTAMP: /* get */ - hwts_cfg.tx_type = HWTSTAMP_TX_ON; - hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL; - if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg))) - return -EFAULT; - return 0; + cfg->tx_type = HWTSTAMP_TX_ON; + cfg->rx_filter = HWTSTAMP_FILTER_ALL; - default: - return -EOPNOTSUPP; - } + return 0; +} +EXPORT_SYMBOL(can_hwtstamp_get); + +/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices + * supporting hardware timestamps + */ +int can_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + if (cfg->tx_type == HWTSTAMP_TX_ON && + cfg->rx_filter == HWTSTAMP_FILTER_ALL) + return 0; + NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported"); + return -ERANGE; } -EXPORT_SYMBOL(can_eth_ioctl_hwts); +EXPORT_SYMBOL(can_hwtstamp_set); /* generic implementation of ethtool_ops::get_ts_info for CAN devices * supporting hardware timestamps diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c index 5d6d2828cd04..c826f00c551b 100644 --- a/drivers/net/can/esd/esd_402_pci-core.c +++ b/drivers/net/can/esd/esd_402_pci-core.c @@ -86,8 +86,8 @@ static const struct net_device_ops pci402_acc_netdev_ops = { .ndo_open = acc_open, .ndo_stop = acc_close, .ndo_start_xmit = acc_start_xmit, - .ndo_change_mtu = can_change_mtu, - .ndo_eth_ioctl = can_eth_ioctl_hwts, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, }; static const struct ethtool_ops pci402_acc_ethtool_ops = { diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 06d5d35fc1b5..f5d22c61503f 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -1867,7 +1867,6 @@ static const struct net_device_ops flexcan_netdev_ops = { .ndo_open = flexcan_open, .ndo_stop = flexcan_close, .ndo_start_xmit = flexcan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static int register_flexcandev(struct net_device *dev) diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index c5784d9779ef..3b1b09943436 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1561,7 +1561,6 @@ static const struct net_device_ops grcan_netdev_ops = { .ndo_open = grcan_open, .ndo_stop = grcan_close, .ndo_start_xmit = grcan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops grcan_ethtool_ops = { diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2eeee65f606f..0f83335e4d07 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -944,7 +944,6 @@ static const struct net_device_ops ifi_canfd_netdev_ops = { .ndo_open = ifi_canfd_open, .ndo_stop = ifi_canfd_close, .ndo_start_xmit = ifi_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ifi_canfd_ethtool_ops = { diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index bfa5cbe88017..1efdd1fd8caa 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1752,7 +1752,6 @@ static const struct net_device_ops ican3_netdev_ops = { .ndo_open = ican3_open, .ndo_stop = ican3_stop, .ndo_start_xmit = ican3_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ican3_ethtool_ops = { diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c index 0880023611be..d8c9bfb20230 100644 --- a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c +++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c @@ -902,9 +902,9 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) static const struct net_device_ops kvaser_pciefd_netdev_ops = { .ndo_open = kvaser_pciefd_open, .ndo_stop = kvaser_pciefd_stop, - .ndo_eth_ioctl = can_eth_ioctl_hwts, .ndo_start_xmit = kvaser_pciefd_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, }; static int kvaser_pciefd_set_phys_id(struct net_device *netdev, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index ad4f577c1ef7..eb856547ae7d 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -23,6 +23,7 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include "m_can.h" @@ -386,8 +387,8 @@ static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val size_t tries = 10; if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) { - dev_err(cdev->dev, - "refusing to configure device when in normal mode\n"); + netdev_err(cdev->net, + "refusing to configure device when in normal mode\n"); return -EBUSY; } @@ -451,7 +452,7 @@ static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts) { if (cdev->active_interrupts == interrupts) return; - cdev->ops->write_reg(cdev, M_CAN_IE, interrupts); + m_can_write(cdev, M_CAN_IE, interrupts); cdev->active_interrupts = interrupts; } @@ -469,7 +470,7 @@ static void m_can_coalescing_disable(struct m_can_classdev *cdev) static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) { if (!cdev->net->irq) { - dev_dbg(cdev->dev, "Start hrtimer\n"); + netdev_dbg(cdev->net, "Start hrtimer\n"); hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), HRTIMER_MODE_REL_PINNED); @@ -485,7 +486,7 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) m_can_write(cdev, M_CAN_ILE, 0x0); if (!cdev->net->irq) { - dev_dbg(cdev->dev, "Stop hrtimer\n"); + netdev_dbg(cdev->net, "Stop hrtimer\n"); hrtimer_try_to_cancel(&cdev->hrtimer); } } @@ -790,6 +791,10 @@ static int m_can_get_berr_counter(const struct net_device *dev, struct m_can_classdev *cdev = netdev_priv(dev); int err; + /* Avoid waking up the controller if the interface is down */ + if (!(dev->flags & IFF_UP)) + return 0; + err = m_can_clk_start(cdev); if (err) return err; @@ -1379,6 +1384,27 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = { .brp_inc = 1, }; +static int m_can_init_ram(struct m_can_classdev *cdev) +{ + int end, i, start; + int err = 0; + + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = cdev->mcfg[MRAM_SIDF].off; + end = cdev->mcfg[MRAM_TXB].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + + for (i = start; i < end; i += 4) { + err = m_can_fifo_write_no_off(cdev, i, 0x0); + if (err) + break; + } + + return err; +} + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); @@ -1464,7 +1490,7 @@ static int m_can_chip_config(struct net_device *dev) err = m_can_init_ram(cdev); if (err) { - dev_err(cdev->dev, "Message RAM configuration failed\n"); + netdev_err(dev, "Message RAM configuration failed\n"); return err; } @@ -1694,7 +1720,7 @@ static int m_can_niso_supported(struct m_can_classdev *cdev) /* Then clear the it again. */ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0); if (ret) { - dev_err(cdev->dev, "failed to revert the NON-ISO bit in CCCR\n"); + netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n"); return ret; } @@ -1713,8 +1739,8 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) m_can_version = m_can_check_core_release(cdev); /* return if unsupported version */ if (!m_can_version) { - dev_err(cdev->dev, "Unsupported version number: %2d", - m_can_version); + netdev_err(cdev->net, "Unsupported version number: %2d", + m_can_version); return -EINVAL; } @@ -1772,8 +1798,8 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; break; default: - dev_err(cdev->dev, "Unsupported version number: %2d", - cdev->version); + netdev_err(cdev->net, "Unsupported version number: %2d", + cdev->version); return -EINVAL; } @@ -1827,6 +1853,7 @@ static int m_can_close(struct net_device *dev) close_candev(dev); + reset_control_assert(cdev->rst); m_can_clk_stop(cdev); phy_power_off(cdev->transceiver); @@ -1950,11 +1977,6 @@ out_fail: static void m_can_tx_submit(struct m_can_classdev *cdev) { - if (cdev->version == 30) - return; - if (!cdev->is_peripheral) - return; - m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit); cdev->tx_peripheral_submit = 0; } @@ -2035,7 +2057,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, return ret; } -static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) +static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer) { struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer); @@ -2069,11 +2091,15 @@ static int m_can_open(struct net_device *dev) if (err) goto out_phy_power_off; + err = reset_control_deassert(cdev->rst); + if (err) + goto exit_disable_clks; + /* open the can device */ err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); - goto exit_disable_clks; + goto out_reset_control_assert; } if (cdev->is_peripheral) @@ -2129,6 +2155,8 @@ out_wq_fail: else napi_disable(&cdev->napi); close_candev(dev); +out_reset_control_assert: + reset_control_assert(cdev->rst); exit_disable_clks: m_can_clk_stop(cdev); out_phy_power_off: @@ -2140,7 +2168,6 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static int m_can_get_coalesce(struct net_device *dev, @@ -2231,6 +2258,55 @@ static int m_can_set_coalesce(struct net_device *dev, return 0; } +static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + + wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0; + wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0; +} + +static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct m_can_classdev *cdev = netdev_priv(dev); + bool wol_enable = !!(wol->wolopts & WAKE_PHY); + int ret; + + if (wol->wolopts & ~WAKE_PHY) + return -EINVAL; + + if (wol_enable == device_may_wakeup(cdev->dev)) + return 0; + + ret = device_set_wakeup_enable(cdev->dev, wol_enable); + if (ret) { + netdev_err(cdev->net, "Failed to set wakeup enable %pE\n", + ERR_PTR(ret)); + return ret; + } + + if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) { + if (wol_enable) + ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup); + else + ret = pinctrl_pm_select_default_state(cdev->dev); + + if (ret) { + netdev_err(cdev->net, "Failed to select pinctrl state %pE\n", + ERR_PTR(ret)); + goto err_wakeup_enable; + } + } + + return 0; + +err_wakeup_enable: + /* Revert wakeup enable */ + device_set_wakeup_enable(cdev->dev, !wol_enable); + + return ret; +} + static const struct ethtool_ops m_can_ethtool_ops_coalescing = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | @@ -2240,10 +2316,14 @@ static const struct ethtool_ops m_can_ethtool_ops_coalescing = { .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = m_can_get_coalesce, .set_coalesce = m_can_set_coalesce, + .get_wol = m_can_get_wol, + .set_wol = m_can_set_wol, }; static const struct ethtool_ops m_can_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, + .get_wol = m_can_get_wol, + .set_wol = m_can_set_wol, }; static int register_m_can_dev(struct m_can_classdev *cdev) @@ -2267,8 +2347,8 @@ int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size) total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; if (total_size > mram_max_size) { - dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n", - total_size, mram_max_size); + netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n", + total_size, mram_max_size); return -EINVAL; } @@ -2303,39 +2383,17 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev, cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & FIELD_MAX(TXBC_NDTB_MASK); - dev_dbg(cdev->dev, - "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", - cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, - cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, - cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, - cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, - cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, - cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, - cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); + netdev_dbg(cdev->net, + "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, + cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, + cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, + cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, + cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, + cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, + cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); } -int m_can_init_ram(struct m_can_classdev *cdev) -{ - int end, i, start; - int err = 0; - - /* initialize the entire Message RAM in use to avoid possible - * ECC/parity checksum errors when reading an uninitialized buffer - */ - start = cdev->mcfg[MRAM_SIDF].off; - end = cdev->mcfg[MRAM_TXB].off + - cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - - for (i = start; i < end; i += 4) { - err = m_can_fifo_write_no_off(cdev, i, 0x0); - if (err) - break; - } - - return err; -} -EXPORT_SYMBOL_GPL(m_can_init_ram); - int m_can_class_get_clocks(struct m_can_classdev *cdev) { int ret = 0; @@ -2344,7 +2402,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev) cdev->cclk = devm_clk_get(cdev->dev, "cclk"); if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) { - dev_err(cdev->dev, "no clock found\n"); + netdev_err(cdev->net, "no clock found\n"); ret = -ENODEV; } @@ -2352,6 +2410,42 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev) } EXPORT_SYMBOL_GPL(m_can_class_get_clocks); +static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev) +{ + return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup; +} + +static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev) +{ + struct device *dev = class_dev->dev; + int ret; + + class_dev->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR(class_dev->pinctrl)) { + ret = PTR_ERR(class_dev->pinctrl); + class_dev->pinctrl = NULL; + + if (ret == -ENODEV) + return 0; + + return dev_err_probe(dev, ret, "Failed to get pinctrl\n"); + } + + class_dev->pinctrl_state_wakeup = + pinctrl_lookup_state(class_dev->pinctrl, "wakeup"); + if (IS_ERR(class_dev->pinctrl_state_wakeup)) { + ret = PTR_ERR(class_dev->pinctrl_state_wakeup); + class_dev->pinctrl_state_wakeup = NULL; + + if (ret == -ENODEV) + return 0; + + return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n"); + } + + return 0; +} + struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv) { @@ -2367,9 +2461,12 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, sizeof(mram_config_vals) / 4); if (ret) { dev_err(dev, "Could not get Message RAM configuration."); - goto out; + return ERR_PTR(ret); } + if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source")) + device_set_wakeup_capable(dev, true); + /* Get TX FIFO size * Defines the total amount of echo buffers for loopback */ @@ -2379,7 +2476,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, net_dev = alloc_candev(sizeof_priv, tx_fifo_size); if (!net_dev) { dev_err(dev, "Failed to allocate CAN device"); - goto out; + return ERR_PTR(-ENOMEM); } class_dev = netdev_priv(net_dev); @@ -2389,8 +2486,16 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, m_can_of_parse_mram(class_dev, mram_config_vals); spin_lock_init(&class_dev->tx_handling_spinlock); -out: + + ret = m_can_class_parse_pinctrl(class_dev); + if (ret) + goto err_free_candev; + return class_dev; + +err_free_candev: + free_candev(net_dev); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); @@ -2411,26 +2516,33 @@ int m_can_class_register(struct m_can_classdev *cdev) devm_kzalloc(cdev->dev, cdev->tx_fifo_size * sizeof(*cdev->tx_ops), GFP_KERNEL); - if (!cdev->tx_ops) { - dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n"); + if (!cdev->tx_ops) return -ENOMEM; - } } + cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL); + if (IS_ERR(cdev->rst)) + return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst), + "Failed to get reset line\n"); + ret = m_can_clk_start(cdev); if (ret) return ret; + ret = reset_control_deassert(cdev->rst); + if (ret) + goto clk_disable; + if (cdev->is_peripheral) { ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, NAPI_POLL_WEIGHT); if (ret) - goto clk_disable; + goto out_reset_control_assert; } if (!cdev->net->irq) { - dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer"); - hrtimer_setup(&cdev->hrtimer, &hrtimer_callback, CLOCK_MONOTONIC, + netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer"); + hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); } else { hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC, @@ -2443,19 +2555,21 @@ int m_can_class_register(struct m_can_classdev *cdev) ret = register_m_can_dev(cdev); if (ret) { - dev_err(cdev->dev, "registering %s failed (err=%d)\n", - cdev->net->name, ret); + netdev_err(cdev->net, "registering %s failed (err=%d)\n", + cdev->net->name, ret); goto rx_offload_del; } of_can_transceiver(cdev->net); - dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", - KBUILD_MODNAME, cdev->net->irq, cdev->version); + netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n", + cdev->net->irq, cdev->version); /* Probe finished - * Stop clocks. They will be reactivated once the M_CAN device is opened + * Assert reset and stop clocks. + * They will be reactivated once the M_CAN device is opened */ + reset_control_assert(cdev->rst); m_can_clk_stop(cdev); return 0; @@ -2463,6 +2577,8 @@ int m_can_class_register(struct m_can_classdev *cdev) rx_offload_del: if (cdev->is_peripheral) can_rx_offload_del(&cdev->offload); +out_reset_control_assert: + reset_control_assert(cdev->rst); clk_disable: m_can_clk_stop(cdev); @@ -2506,7 +2622,8 @@ int m_can_class_suspend(struct device *dev) cdev->can.state = CAN_STATE_SLEEPING; } - pinctrl_pm_select_sleep_state(dev); + if (!m_can_class_wakeup_pinctrl_enabled(cdev)) + pinctrl_pm_select_sleep_state(dev); return ret; } @@ -2518,7 +2635,8 @@ int m_can_class_resume(struct device *dev) struct net_device *ndev = cdev->net; int ret = 0; - pinctrl_pm_select_default_state(dev); + if (!m_can_class_wakeup_pinctrl_enabled(cdev)) + pinctrl_pm_select_default_state(dev); if (netif_running(ndev)) { ret = m_can_clk_start(cdev); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index bd4746c63af3..4743342b2fba 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -86,6 +86,7 @@ struct m_can_classdev { struct device *dev; struct clk *hclk; struct clk *cclk; + struct reset_control *rst; struct workqueue_struct *tx_wq; struct phy *transceiver; @@ -128,6 +129,9 @@ struct m_can_classdev { struct mram_cfg mcfg[MRAM_CFG_NUM]; struct hrtimer hrtimer; + + struct pinctrl *pinctrl; + struct pinctrl_state *pinctrl_state_wakeup; }; struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv); @@ -135,7 +139,6 @@ void m_can_class_free_dev(struct net_device *net); int m_can_class_register(struct m_can_classdev *cdev); void m_can_class_unregister(struct m_can_classdev *cdev); int m_can_class_get_clocks(struct m_can_classdev *cdev); -int m_can_init_ram(struct m_can_classdev *priv); int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size); int m_can_class_suspend(struct device *dev); diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 9ad7419f88f8..eb31ed1f9644 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -111,8 +111,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) mcan_class = m_can_class_allocate_dev(&pci->dev, sizeof(struct m_can_pci_priv)); - if (!mcan_class) - return -ENOMEM; + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); priv = cdev_to_priv(mcan_class); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 4a412add2b8d..56da411878af 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -87,8 +87,8 @@ static int m_can_plat_probe(struct platform_device *pdev) mcan_class = m_can_class_allocate_dev(&pdev->dev, sizeof(struct m_can_plat_priv)); - if (!mcan_class) - return -ENOMEM; + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); priv = cdev_to_priv(mcan_class); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 39b0b5277b11..31cc9d0abd45 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -416,8 +416,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi) mcan_class = m_can_class_allocate_dev(&spi->dev, sizeof(struct tcan4x5x_priv)); - if (!mcan_class) - return -ENOMEM; + if (IS_ERR(mcan_class)) + return PTR_ERR(mcan_class); ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE); if (ret) diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 8c2a7bc64d3d..39c7aa2a0b2f 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -607,7 +607,6 @@ static const struct net_device_ops mscan_netdev_ops = { .ndo_open = mscan_open, .ndo_stop = mscan_close, .ndo_start_xmit = mscan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mscan_ethtool_ops = { diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index b5bc80ac7876..06cb2629f66a 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -743,37 +743,33 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int peak_eth_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config hwts_cfg = { 0 }; + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_ALL; - switch (cmd) { - case SIOCSHWTSTAMP: /* set */ - if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg))) - return -EFAULT; - if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF && - hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL) - return 0; - return -ERANGE; + return 0; +} - case SIOCGHWTSTAMP: /* get */ - hwts_cfg.tx_type = HWTSTAMP_TX_OFF; - hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL; - if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg))) - return -EFAULT; +static int peak_eth_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_ALL) return 0; - default: - return -EOPNOTSUPP; - } + NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported"); + return -ERANGE; } static const struct net_device_ops peak_canfd_netdev_ops = { .ndo_open = peak_canfd_open, .ndo_stop = peak_canfd_close, - .ndo_eth_ioctl = peak_eth_ioctl, .ndo_start_xmit = peak_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = peak_eth_hwtstamp_get, + .ndo_hwtstamp_set = peak_eth_hwtstamp_set, }; static int peak_get_ts_info(struct net_device *dev, diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 5f85f4e27205..fc3df328e877 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -635,7 +635,6 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops rcar_can_ethtool_ops = { diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 45d36adb51b7..49ab65274b51 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1818,7 +1818,6 @@ static const struct net_device_ops rcar_canfd_netdev_ops = { .ndo_open = rcar_canfd_open, .ndo_stop = rcar_canfd_close, .ndo_start_xmit = rcar_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops rcar_canfd_ethtool_ops = { diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index 046f0a0ae4d4..29de0c01e4ed 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -761,7 +761,6 @@ static const struct net_device_ops rkcanfd_netdev_ops = { .ndo_open = rkcanfd_open, .ndo_stop = rkcanfd_stop, .ndo_start_xmit = rkcanfd_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev) diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 4d245857ef1c..acfa49db3907 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -697,7 +697,6 @@ static const struct net_device_ops sja1000_netdev_ops = { .ndo_open = sja1000_open, .ndo_stop = sja1000_close, .ndo_start_xmit = sja1000_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops sja1000_ethtool_ops = { diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 58ff2ec1d975..cd789e178d34 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -774,7 +774,6 @@ static const struct net_device_ops slcan_netdev_ops = { .ndo_open = slcan_netdev_open, .ndo_stop = slcan_netdev_close, .ndo_start_xmit = slcan_netdev_xmit, - .ndo_change_mtu = can_change_mtu, }; /****************************************** diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 278ee8722770..79bc64395ac4 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -609,7 +609,6 @@ static const struct net_device_ops softing_netdev_ops = { .ndo_open = softing_netdev_open, .ndo_stop = softing_netdev_stop, .ndo_start_xmit = softing_netdev_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops softing_ethtool_ops = { diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 6d4b643e135f..e00d3dbc4cf4 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -799,7 +799,6 @@ static const struct net_device_ops hi3110_netdev_ops = { .ndo_open = hi3110_open, .ndo_stop = hi3110_stop, .ndo_start_xmit = hi3110_hard_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops hi3110_ethtool_ops = { diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index b797e08499d7..fa97adf25b73 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1270,7 +1270,6 @@ static const struct net_device_ops mcp251x_netdev_ops = { .ndo_open = mcp251x_open, .ndo_stop = mcp251x_stop, .ndo_start_xmit = mcp251x_hard_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mcp251x_ethtool_ops = { @@ -1321,7 +1320,7 @@ static int mcp251x_can_probe(struct spi_device *spi) clk = devm_clk_get_optional(&spi->dev, NULL); if (IS_ERR(clk)) - return PTR_ERR(clk); + return dev_err_probe(&spi->dev, PTR_ERR(clk), "Cannot get clock\n"); freq = clk_get_rate(clk); if (freq == 0) @@ -1329,7 +1328,7 @@ static int mcp251x_can_probe(struct spi_device *spi) /* Sanity check */ if (freq < 1000000 || freq > 25000000) - return -ERANGE; + return dev_err_probe(&spi->dev, -ERANGE, "clock frequency out of range\n"); /* Allocate can/net device */ net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); @@ -1337,8 +1336,10 @@ static int mcp251x_can_probe(struct spi_device *spi) return -ENOMEM; ret = clk_prepare_enable(clk); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot enable clock\n"); goto out_free; + } net->netdev_ops = &mcp251x_netdev_ops; net->ethtool_ops = &mcp251x_ethtool_ops; @@ -1363,20 +1364,25 @@ static int mcp251x_can_probe(struct spi_device *spi) else spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000; ret = spi_setup(spi); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot set up spi\n"); goto out_clk; + } priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; + dev_err_probe(&spi->dev, ret, "supply deferred\n"); goto out_clk; } ret = mcp251x_power_enable(priv->power, 1); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot enable power\n"); goto out_clk; + } priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU, @@ -1410,21 +1416,24 @@ static int mcp251x_can_probe(struct spi_device *spi) /* Here is OK to not lock the MCP, no one knows about it yet */ ret = mcp251x_hw_probe(spi); if (ret) { - if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", - priv->model); + dev_err_probe(&spi->dev, ret, "Cannot initialize MCP%x. Wrong wiring?\n", + priv->model); goto error_probe; } mcp251x_hw_sleep(spi); ret = register_candev(net); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot register CAN device\n"); goto error_probe; + } ret = mcp251x_gpio_setup(priv); - if (ret) + if (ret) { + dev_err_probe(&spi->dev, ret, "Cannot set up gpios\n"); goto out_unregister_candev; + } netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; @@ -1443,7 +1452,6 @@ out_clk: out_free: free_candev(net); - dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); return ret; } diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig index 877e4356010d..7c29846e6051 100644 --- a/drivers/net/can/spi/mcp251xfd/Kconfig +++ b/drivers/net/can/spi/mcp251xfd/Kconfig @@ -5,6 +5,7 @@ config CAN_MCP251XFD select CAN_RX_OFFLOAD select REGMAP select WANT_DEV_COREDUMP + select GPIOLIB help Driver for the Microchip MCP251XFD SPI FD-CAN controller family. diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 7450ea42c1ea..5134ebb85880 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -608,23 +608,21 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv) { - u32 val; + u32 val, mask; if (!priv->rx_int) return 0; - /* Configure GPIOs: - * - PIN0: GPIO Input - * - PIN1: GPIO Input/RX Interrupt + /* Configure PIN1 as RX Interrupt: * * PIN1 must be Input, otherwise there is a glitch on the * rx-INT line. It happens between setting the PIN as output * (in the first byte of the SPI transfer) and configuring the * PIN as interrupt (in the last byte of the SPI transfer). */ - val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 | - MCP251XFD_REG_IOCON_TRIS0; - return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); + val = MCP251XFD_REG_IOCON_TRIS(1); + mask = MCP251XFD_REG_IOCON_TRIS(1) | MCP251XFD_REG_IOCON_PM(1); + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, mask, val); } static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) @@ -634,13 +632,9 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) if (!priv->rx_int) return 0; - /* Configure GPIOs: - * - PIN0: GPIO Input - * - PIN1: GPIO Input - */ - val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 | - MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0; - return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); + /* Configure PIN1 as GPIO Input */ + val = MCP251XFD_REG_IOCON_PM(1) | MCP251XFD_REG_IOCON_TRIS(1); + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val, val); } static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) @@ -767,21 +761,13 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, mcp251xfd_chip_interrupts_disable(priv); mcp251xfd_chip_rx_int_disable(priv); mcp251xfd_timestamp_stop(priv); - mcp251xfd_chip_sleep(priv); + mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); } static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) { int err; - err = mcp251xfd_chip_softreset(priv); - if (err) - goto out_chip_stop; - - err = mcp251xfd_chip_clock_init(priv); - if (err) - goto out_chip_stop; - err = mcp251xfd_chip_timestamp_init(priv); if (err) goto out_chip_stop; @@ -1625,8 +1611,11 @@ static int mcp251xfd_open(struct net_device *ndev) return err; err = pm_runtime_resume_and_get(ndev->dev.parent); - if (err) + if (err) { + if (err == -ETIMEDOUT || err == -ENODEV) + pm_runtime_set_suspended(ndev->dev.parent); goto out_close_candev; + } err = mcp251xfd_ring_alloc(priv); if (err) @@ -1714,8 +1703,8 @@ static const struct net_device_ops mcp251xfd_netdev_ops = { .ndo_open = mcp251xfd_open, .ndo_stop = mcp251xfd_stop, .ndo_start_xmit = mcp251xfd_start_xmit, - .ndo_eth_ioctl = can_eth_ioctl_hwts, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, }; static void @@ -1808,6 +1797,160 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) return 0; } +static const char * const mcp251xfd_gpio_names[] = { "GPIO0", "GPIO1" }; + +static int mcp251xfd_gpio_request(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 pin_mask = MCP251XFD_REG_IOCON_PM(offset); + int ret; + + if (priv->rx_int && offset == 1) { + netdev_err(priv->ndev, "Can't use GPIO 1 with RX-INT!\n"); + return -EINVAL; + } + + ret = pm_runtime_resume_and_get(priv->ndev->dev.parent); + if (ret) + return ret; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, pin_mask, pin_mask); +} + +static void mcp251xfd_gpio_free(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + + pm_runtime_put(priv->ndev->dev.parent); +} + +static int mcp251xfd_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 mask = MCP251XFD_REG_IOCON_TRIS(offset); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + if (mask & val) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; +} + +static int mcp251xfd_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 mask = MCP251XFD_REG_IOCON_GPIO(offset); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + return !!(mask & val); +} + +static int mcp251xfd_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bit) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val); + if (ret) + return ret; + + *bit = FIELD_GET(MCP251XFD_REG_IOCON_GPIO_MASK, val) & *mask; + + return 0; +} + +static int mcp251xfd_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset); + u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset); + u32 val; + + if (value) + val = val_mask; + else + val = 0; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, + dir_mask | val_mask, val); +} + +static int mcp251xfd_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset); + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, dir_mask, dir_mask); +} + +static int mcp251xfd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset); + u32 val; + + if (value) + val = val_mask; + else + val = 0; + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val_mask, val); +} + +static int mcp251xfd_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) +{ + struct mcp251xfd_priv *priv = gpiochip_get_data(chip); + u32 val; + + val = FIELD_PREP(MCP251XFD_REG_IOCON_LAT_MASK, *bits); + + return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, + MCP251XFD_REG_IOCON_LAT_MASK, val); +} + +static int mcp251fdx_gpio_setup(struct mcp251xfd_priv *priv) +{ + struct gpio_chip *gc = &priv->gc; + + if (!device_property_present(&priv->spi->dev, "gpio-controller")) + return 0; + + gc->label = dev_name(&priv->spi->dev); + gc->parent = &priv->spi->dev; + gc->owner = THIS_MODULE; + gc->request = mcp251xfd_gpio_request; + gc->free = mcp251xfd_gpio_free; + gc->get_direction = mcp251xfd_gpio_get_direction; + gc->direction_output = mcp251xfd_gpio_direction_output; + gc->direction_input = mcp251xfd_gpio_direction_input; + gc->get = mcp251xfd_gpio_get; + gc->get_multiple = mcp251xfd_gpio_get_multiple; + gc->set = mcp251xfd_gpio_set; + gc->set_multiple = mcp251xfd_gpio_set_multiple; + gc->base = -1; + gc->can_sleep = true; + gc->ngpio = ARRAY_SIZE(mcp251xfd_gpio_names); + gc->names = mcp251xfd_gpio_names; + + return devm_gpiochip_add_data(&priv->spi->dev, gc, priv); +} + static int mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, u32 *effective_speed_hz_slow, @@ -1907,53 +2050,59 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) struct net_device *ndev = priv->ndev; int err; + mcp251xfd_register_quirks(priv); + err = mcp251xfd_clks_and_vdd_enable(priv); if (err) return err; - pm_runtime_get_noresume(ndev->dev.parent); - err = pm_runtime_set_active(ndev->dev.parent); - if (err) - goto out_runtime_put_noidle; - pm_runtime_enable(ndev->dev.parent); - - mcp251xfd_register_quirks(priv); - err = mcp251xfd_chip_softreset(priv); if (err == -ENODEV) - goto out_runtime_disable; + goto out_clks_and_vdd_disable; if (err) goto out_chip_sleep; err = mcp251xfd_chip_clock_init(priv); if (err == -ENODEV) - goto out_runtime_disable; + goto out_clks_and_vdd_disable; if (err) goto out_chip_sleep; + pm_runtime_get_noresume(ndev->dev.parent); + err = pm_runtime_set_active(ndev->dev.parent); + if (err) + goto out_runtime_put_noidle; + pm_runtime_enable(ndev->dev.parent); + err = mcp251xfd_register_chip_detect(priv); if (err) - goto out_chip_sleep; + goto out_runtime_disable; err = mcp251xfd_register_check_rx_int(priv); if (err) - goto out_chip_sleep; + goto out_runtime_disable; mcp251xfd_ethtool_init(priv); + err = mcp251fdx_gpio_setup(priv); + if (err) { + dev_err_probe(&priv->spi->dev, err, "Failed to register gpio-controller.\n"); + goto out_runtime_disable; + } + err = register_candev(ndev); if (err) - goto out_chip_sleep; + goto out_runtime_disable; err = mcp251xfd_register_done(priv); if (err) goto out_unregister_candev; - /* Put controller into sleep mode and let pm_runtime_put() - * disable the clocks and vdd. If CONFIG_PM is not enabled, - * the clocks and vdd will stay powered. + /* Put controller into Config mode and let pm_runtime_put() + * put in sleep mode, disable the clocks and vdd. If CONFIG_PM + * is not enabled, the clocks and vdd will stay powered. */ - err = mcp251xfd_chip_sleep(priv); + err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); if (err) goto out_unregister_candev; @@ -1963,12 +2112,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) out_unregister_candev: unregister_candev(ndev); -out_chip_sleep: - mcp251xfd_chip_sleep(priv); out_runtime_disable: pm_runtime_disable(ndev->dev.parent); out_runtime_put_noidle: pm_runtime_put_noidle(ndev->dev.parent); +out_chip_sleep: + mcp251xfd_chip_sleep(priv); +out_clks_and_vdd_disable: mcp251xfd_clks_and_vdd_disable(priv); return err; @@ -1980,10 +2130,12 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) unregister_candev(ndev); - if (pm_runtime_enabled(ndev->dev.parent)) + if (pm_runtime_enabled(ndev->dev.parent)) { pm_runtime_disable(ndev->dev.parent); - else + } else { + mcp251xfd_chip_sleep(priv); mcp251xfd_clks_and_vdd_disable(priv); + } } static const struct of_device_id mcp251xfd_of_match[] = { @@ -2206,16 +2358,41 @@ static void mcp251xfd_remove(struct spi_device *spi) static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) { - const struct mcp251xfd_priv *priv = dev_get_drvdata(device); + struct mcp251xfd_priv *priv = dev_get_drvdata(device); + mcp251xfd_chip_sleep(priv); return mcp251xfd_clks_and_vdd_disable(priv); } static int __maybe_unused mcp251xfd_runtime_resume(struct device *device) { - const struct mcp251xfd_priv *priv = dev_get_drvdata(device); + struct mcp251xfd_priv *priv = dev_get_drvdata(device); + int err; + + err = mcp251xfd_clks_and_vdd_enable(priv); + if (err) + return err; - return mcp251xfd_clks_and_vdd_enable(priv); + err = mcp251xfd_chip_softreset(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + err = mcp251xfd_chip_clock_init(priv); + if (err == -ENODEV) + goto out_clks_and_vdd_disable; + if (err) + goto out_chip_sleep; + + return 0; + +out_chip_sleep: + mcp251xfd_chip_sleep(priv); +out_clks_and_vdd_disable: + mcp251xfd_clks_and_vdd_disable(priv); + + return err; } static const struct dev_pm_ops mcp251xfd_pm_ops = { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 8c5be8d1c519..70d5ff0ae7ac 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -13,17 +13,9 @@ static const struct regmap_config mcp251xfd_regmap_crc; static int -mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count) -{ - struct spi_device *spi = context; - - return spi_write(spi, data, count); -} - -static int -mcp251xfd_regmap_nocrc_gather_write(void *context, - const void *reg, size_t reg_len, - const void *val, size_t val_len) +_mcp251xfd_regmap_nocrc_gather_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); @@ -47,6 +39,54 @@ mcp251xfd_regmap_nocrc_gather_write(void *context, return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); } +static int +mcp251xfd_regmap_nocrc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) +{ + const u16 byte_exclude = MCP251XFD_REG_IOCON + + mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK); + u16 reg = be16_to_cpu(*(__be16 *)reg_p) & MCP251XFD_SPI_ADDRESS_MASK; + int ret; + + /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1 + * + * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one + * SPI write command clears LAT0/LAT1. + * + * Errata Fix/Work Around suggests to write registers with single byte + * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16]) + * is for read-only access and writing to it causes the clearing of LAT0/LAT1. + */ + if (reg <= byte_exclude && reg + val_len > byte_exclude) { + size_t len = byte_exclude - reg; + + /* Write up to 0xe05 */ + ret = _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, val, len); + if (ret) + return ret; + + /* Write from 0xe07 on */ + reg += len + 1; + reg = (__force unsigned short)cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | reg); + return _mcp251xfd_regmap_nocrc_gather_write(context, ®, reg_len, + val + len + 1, + val_len - len - 1); + } + + return _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, + val, val_len); +} + +static int +mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count) +{ + const size_t data_offset = sizeof(__be16); + + return mcp251xfd_regmap_nocrc_gather_write(context, data, data_offset, + data + data_offset, count - data_offset); +} + static inline bool mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv, unsigned int reg) @@ -64,6 +104,7 @@ mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv, case MCP251XFD_REG_CON: case MCP251XFD_REG_OSC: case MCP251XFD_REG_ECCCON: + case MCP251XFD_REG_IOCON: return true; default: mcp251xfd_for_each_rx_ring(priv, ring, n) { @@ -139,10 +180,9 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg, tmp_le32 = orig_le32 & ~mask_le32; tmp_le32 |= val_le32 & mask_le32; - mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte); - memcpy(buf_tx->data, &tmp_le32, len); - - return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len); + reg += first_byte; + mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg); + return mcp251xfd_regmap_nocrc_gather_write(context, &buf_tx->cmd, 2, &tmp_le32, len); } static int @@ -196,9 +236,9 @@ mcp251xfd_regmap_nocrc_read(void *context, } static int -mcp251xfd_regmap_crc_gather_write(void *context, - const void *reg_p, size_t reg_len, - const void *val, size_t val_len) +_mcp251xfd_regmap_crc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); @@ -230,6 +270,44 @@ mcp251xfd_regmap_crc_gather_write(void *context, } static int +mcp251xfd_regmap_crc_gather_write(void *context, + const void *reg_p, size_t reg_len, + const void *val, size_t val_len) +{ + const u16 byte_exclude = MCP251XFD_REG_IOCON + + mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK); + u16 reg = *(u16 *)reg_p; + int ret; + + /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1 + * + * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one + * SPI write command clears LAT0/LAT1. + * + * Errata Fix/Work Around suggests to write registers with single byte + * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16]) + * is for read-only access and writing to it causes the clearing of LAT0/LAT1. + */ + if (reg <= byte_exclude && reg + val_len > byte_exclude) { + size_t len = byte_exclude - reg; + + /* Write up to 0xe05 */ + ret = _mcp251xfd_regmap_crc_gather_write(context, ®, reg_len, val, len); + if (ret) + return ret; + + /* Write from 0xe07 on */ + reg += len + 1; + return _mcp251xfd_regmap_crc_gather_write(context, ®, reg_len, + val + len + 1, + val_len - len - 1); + } + + return _mcp251xfd_regmap_crc_gather_write(context, reg_p, reg_len, + val, val_len); +} + +static int mcp251xfd_regmap_crc_write(void *context, const void *data, size_t count) { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index dcbbd2b2fae8..085d7101e595 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -15,6 +15,7 @@ #include <linux/can/dev.h> #include <linux/can/rx-offload.h> #include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/regmap.h> @@ -335,13 +336,19 @@ #define MCP251XFD_REG_IOCON_TXCANOD BIT(28) #define MCP251XFD_REG_IOCON_PM1 BIT(25) #define MCP251XFD_REG_IOCON_PM0 BIT(24) +#define MCP251XFD_REG_IOCON_PM(n) (MCP251XFD_REG_IOCON_PM0 << (n)) #define MCP251XFD_REG_IOCON_GPIO1 BIT(17) #define MCP251XFD_REG_IOCON_GPIO0 BIT(16) +#define MCP251XFD_REG_IOCON_GPIO(n) (MCP251XFD_REG_IOCON_GPIO0 << (n)) +#define MCP251XFD_REG_IOCON_GPIO_MASK GENMASK(17, 16) #define MCP251XFD_REG_IOCON_LAT1 BIT(9) #define MCP251XFD_REG_IOCON_LAT0 BIT(8) +#define MCP251XFD_REG_IOCON_LAT(n) (MCP251XFD_REG_IOCON_LAT0 << (n)) +#define MCP251XFD_REG_IOCON_LAT_MASK GENMASK(9, 8) #define MCP251XFD_REG_IOCON_XSTBYEN BIT(6) #define MCP251XFD_REG_IOCON_TRIS1 BIT(1) #define MCP251XFD_REG_IOCON_TRIS0 BIT(0) +#define MCP251XFD_REG_IOCON_TRIS(n) (MCP251XFD_REG_IOCON_TRIS0 << (n)) #define MCP251XFD_REG_CRC 0xe08 #define MCP251XFD_REG_CRC_FERRIE BIT(25) @@ -670,6 +677,7 @@ struct mcp251xfd_priv { struct mcp251xfd_devtype_data devtype_data; struct can_berr_counter bec; + struct gpio_chip gc; }; #define MCP251XFD_IS(_model) \ diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 53bfd873de9b..6fcb301ef611 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -768,7 +768,6 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_open = sun4ican_open, .ndo_stop = sun4ican_close, .ndo_start_xmit = sun4ican_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops sun4ican_ethtool_ops = { diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index e6d6661a908a..1d3dbf28b105 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -829,7 +829,6 @@ static const struct net_device_ops ti_hecc_netdev_ops = { .ndo_open = ti_hecc_open, .ndo_stop = ti_hecc_close, .ndo_start_xmit = ti_hecc_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ti_hecc_ethtool_ops = { diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5355bac4dccb..de8e212a1366 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -885,7 +885,6 @@ static const struct net_device_ops ems_usb_netdev_ops = { .ndo_open = ems_usb_open, .ndo_stop = ems_usb_close, .ndo_start_xmit = ems_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ems_usb_ethtool_ops = { diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 9bc1824d7be6..08da507faef4 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -1011,7 +1011,6 @@ static const struct net_device_ops esd_usb_netdev_ops = { .ndo_open = esd_usb_open, .ndo_stop = esd_usb_close, .ndo_start_xmit = esd_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops esd_usb_ethtool_ops = { diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index adc91873c083..f799233c2b72 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -1976,8 +1976,8 @@ static const struct net_device_ops es58x_netdev_ops = { .ndo_open = es58x_open, .ndo_stop = es58x_stop, .ndo_start_xmit = es58x_start_xmit, - .ndo_eth_ioctl = can_eth_ioctl_hwts, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, }; static const struct ethtool_ops es58x_ethtool_ops = { diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c index e0cfa1460b0b..efe61ece79ea 100644 --- a/drivers/net/can/usb/f81604.c +++ b/drivers/net/can/usb/f81604.c @@ -1052,7 +1052,6 @@ static const struct net_device_ops f81604_netdev_ops = { .ndo_open = f81604_open, .ndo_stop = f81604_close, .ndo_start_xmit = f81604_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct can_bittiming_const f81604_bittiming_const = { diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 69b8d6da651b..1321eb5e89ae 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -1087,12 +1087,25 @@ static int gs_can_close(struct net_device *netdev) return 0; } -static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int gs_can_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) { const struct gs_can *dev = netdev_priv(netdev); if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) - return can_eth_ioctl_hwts(netdev, ifr, cmd); + return can_hwtstamp_get(netdev, cfg); + + return -EOPNOTSUPP; +} + +static int gs_can_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + const struct gs_can *dev = netdev_priv(netdev); + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + return can_hwtstamp_set(netdev, cfg, extack); return -EOPNOTSUPP; } @@ -1101,8 +1114,8 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, - .ndo_change_mtu = can_change_mtu, - .ndo_eth_ioctl = gs_can_eth_ioctl, + .ndo_hwtstamp_get = gs_can_hwtstamp_get, + .ndo_hwtstamp_set = gs_can_hwtstamp_set, }; static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 90e77fa0ff4a..62701ec34272 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -784,9 +784,9 @@ static int kvaser_usb_set_phys_id(struct net_device *netdev, static const struct net_device_ops kvaser_usb_netdev_ops = { .ndo_open = kvaser_usb_open, .ndo_stop = kvaser_usb_close, - .ndo_eth_ioctl = can_eth_ioctl_hwts, .ndo_start_xmit = kvaser_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = can_hwtstamp_get, + .ndo_hwtstamp_set = can_hwtstamp_set, }; static const struct ethtool_ops kvaser_usb_ethtool_ops = { diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 1f9b915094e6..41c0a1c399bf 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -761,7 +761,6 @@ static const struct net_device_ops mcba_netdev_ops = { .ndo_open = mcba_usb_open, .ndo_stop = mcba_usb_close, .ndo_start_xmit = mcba_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mcba_ethtool_ops = { diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c index 8deff16491a1..dd6df2ec3742 100644 --- a/drivers/net/can/usb/nct6694_canfd.c +++ b/drivers/net/can/usb/nct6694_canfd.c @@ -690,7 +690,6 @@ static const struct net_device_ops nct6694_canfd_netdev_ops = { .ndo_open = nct6694_canfd_open, .ndo_stop = nct6694_canfd_close, .ndo_start_xmit = nct6694_canfd_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops nct6694_canfd_ethtool_ops = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index c74302ca7cee..cf48bb26d46d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -784,37 +784,33 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev) return 0; } -static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int peak_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config hwts_cfg = { 0 }; - - switch (cmd) { - case SIOCSHWTSTAMP: /* set */ - if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg))) - return -EFAULT; - if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF && - hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL) - return 0; - return -ERANGE; - - case SIOCGHWTSTAMP: /* get */ - hwts_cfg.tx_type = HWTSTAMP_TX_OFF; - hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL; - if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg))) - return -EFAULT; + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_ALL; + + return 0; +} + +static int peak_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_ALL) return 0; - default: - return -EOPNOTSUPP; - } + NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported"); + return -ERANGE; } static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, - .ndo_eth_ioctl = peak_eth_ioctl, .ndo_start_xmit = peak_usb_ndo_start_xmit, - .ndo_change_mtu = can_change_mtu, + .ndo_hwtstamp_get = peak_hwtstamp_get, + .ndo_hwtstamp_set = peak_hwtstamp_set, }; /* CAN-USB devices generally handle 32-bit CAN channel IDs. diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 07406daf7c88..de61d9da99e3 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -1233,7 +1233,6 @@ static const struct net_device_ops ucan_netdev_ops = { .ndo_open = ucan_open, .ndo_stop = ucan_close, .ndo_start_xmit = ucan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ucan_ethtool_ops = { diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 8a5596ce4e46..7449328f7cd7 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -868,7 +868,6 @@ static const struct net_device_ops usb_8dev_netdev_ops = { .ndo_open = usb_8dev_open, .ndo_stop = usb_8dev_close, .ndo_start_xmit = usb_8dev_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops usb_8dev_ethtool_ops = { diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index a25a3ca62c12..43d7f22820b8 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1702,7 +1702,6 @@ static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, - .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops xcan_ethtool_ops = { diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 4d9af691b989..7eb301fd987d 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -154,4 +154,11 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM This enables support for the Vitesse VSC7385, VSC7388, VSC7395 and VSC7398 SparX integrated ethernet switches, connected over a CPU-attached address bus and work in memory-mapped I/O mode. + +config NET_DSA_YT921X + tristate "Motorcomm YT9215 ethernet switch chip support" + select NET_DSA_TAG_YT921X + help + This enables support for the Motorcomm YT9215 ethernet switch + chip. endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 0f8ff4a1a313..16de4ba3fa38 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o +obj-$(CONFIG_NET_DSA_YT921X) += yt921x.o obj-y += b53/ obj-y += hirschmann/ obj-y += lantiq/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index eb767edc4c13..72c85cd34a4e 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -632,6 +632,25 @@ static void b53_port_set_learning(struct b53_device *dev, int port, b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); } +static void b53_port_set_isolated(struct b53_device *dev, int port, + bool isolated) +{ + u8 offset; + u16 reg; + + if (is5325(dev)) + offset = B53_PROTECTED_PORT_SEL_25; + else + offset = B53_PROTECTED_PORT_SEL; + + b53_read16(dev, B53_CTRL_PAGE, offset, ®); + if (isolated) + reg |= BIT(port); + else + reg &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, offset, reg); +} + static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { struct b53_device *dev = ds->priv; @@ -652,6 +671,7 @@ int b53_setup_port(struct dsa_switch *ds, int port) b53_port_set_ucast_flood(dev, port, true); b53_port_set_mcast_flood(dev, port, true); b53_port_set_learning(dev, port, false); + b53_port_set_isolated(dev, port, false); /* Force all traffic to go to the CPU port to prevent the ASIC from * trying to forward to bridged ports on matching FDB entries, then @@ -1830,49 +1850,78 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) return b53_arl_op_wait(dev); } -static int b53_arl_read(struct b53_device *dev, u64 mac, - u16 vid, struct b53_arl_entry *ent, u8 *idx) +static void b53_arl_read_entry_25(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) { - DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); - unsigned int i; - int ret; + u64 mac_vid; - ret = b53_arl_op_wait(dev); - if (ret) - return ret; + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid); +} - bitmap_zero(free_bins, dev->num_arl_bins); +static void b53_arl_write_entry_25(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u64 mac_vid; - /* Read the bins */ - for (i = 0; i < dev->num_arl_bins; i++) { - u64 mac_vid; - u32 fwd_entry; + b53_arl_from_entry_25(&mac_vid, ent); + b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + mac_vid); +} - b53_read64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); - b53_read32(dev, B53_ARLIO_PAGE, - B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); - b53_arl_to_entry(ent, mac_vid, fwd_entry); +static void b53_arl_read_entry_89(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + u64 mac_vid; + u16 fwd_entry; - if (!(fwd_entry & ARLTBL_VALID)) { - set_bit(i, free_bins); - continue; - } - if ((mac_vid & ARLTBL_MAC_MASK) != mac) - continue; - if (dev->vlan_enabled && - ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) - continue; - *idx = i; - return 0; - } + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); + b53_arl_to_entry_89(ent, mac_vid, fwd_entry); +} - *idx = find_first_bit(free_bins, dev->num_arl_bins); - return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; +static void b53_arl_write_entry_89(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent); + b53_write64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); + b53_write16(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); } -static int b53_arl_read_25(struct b53_device *dev, u64 mac, - u16 vid, struct b53_arl_entry *ent, u8 *idx) +static void b53_arl_read_entry_95(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); +} + +static void b53_arl_write_entry_95(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_arl_from_entry(&mac_vid, &fwd_entry, ent); + b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + mac_vid); + b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), + fwd_entry); +} + +static int b53_arl_read(struct b53_device *dev, const u8 *mac, + u16 vid, struct b53_arl_entry *ent, u8 *idx) { DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); unsigned int i; @@ -1886,21 +1935,15 @@ static int b53_arl_read_25(struct b53_device *dev, u64 mac, /* Read the bins */ for (i = 0; i < dev->num_arl_bins; i++) { - u64 mac_vid; - - b53_read64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); + b53_arl_read_entry(dev, ent, i); - b53_arl_to_entry_25(ent, mac_vid); - - if (!(mac_vid & ARLTBL_VALID_25)) { + if (!ent->is_valid) { set_bit(i, free_bins); continue; } - if ((mac_vid & ARLTBL_MAC_MASK) != mac) + if (!ether_addr_equal(ent->mac, mac)) continue; - if (dev->vlan_enabled && - ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid) + if (dev->vlan_enabled && ent->vid != vid) continue; *idx = i; return 0; @@ -1914,9 +1957,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, const unsigned char *addr, u16 vid, bool is_valid) { struct b53_arl_entry ent; - u32 fwd_entry; - u64 mac, mac_vid = 0; u8 idx = 0; + u64 mac; int ret; /* Convert the array into a 64-bit MAC */ @@ -1932,10 +1974,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, if (ret) return ret; - if (is5325(dev) || is5365(dev)) - ret = b53_arl_read_25(dev, mac, vid, &ent, &idx); - else - ret = b53_arl_read(dev, mac, vid, &ent, &idx); + ret = b53_arl_read(dev, addr, vid, &ent, &idx); /* If this is a read, just finish now */ if (op) @@ -1952,7 +1991,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* We could not find a matching MAC, so reset to a new entry */ dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", addr, vid, idx); - fwd_entry = 0; break; default: dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", @@ -1979,17 +2017,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, ent.is_static = true; ent.is_age = false; memcpy(ent.mac, addr, ETH_ALEN); - if (is5325(dev) || is5365(dev)) - b53_arl_from_entry_25(&mac_vid, &ent); - else - b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); - - b53_write64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); - - if (!is5325(dev) && !is5365(dev)) - b53_write32(dev, B53_ARLIO_PAGE, - B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); + b53_arl_write_entry(dev, &ent, idx); return b53_arl_rw_op(dev, 0); } @@ -2024,18 +2052,53 @@ int b53_fdb_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_fdb_del); -static int b53_arl_search_wait(struct b53_device *dev) +static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val) { - unsigned int timeout = 1000; - u8 reg, offset; + u8 offset; + + if (is5325(dev) || is5365(dev)) + offset = B53_ARL_SRCH_CTL_25; + else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || + is63xx(dev)) + offset = B53_ARL_SRCH_CTL_89; + else + offset = B53_ARL_SRCH_CTL; + + if (is63xx(dev)) { + u16 val16; + + b53_read16(dev, B53_ARLIO_PAGE, offset, &val16); + *val = val16 & 0xff; + } else { + b53_read8(dev, B53_ARLIO_PAGE, offset, val); + } +} + +static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val) +{ + u8 offset; if (is5325(dev) || is5365(dev)) offset = B53_ARL_SRCH_CTL_25; + else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || + is63xx(dev)) + offset = B53_ARL_SRCH_CTL_89; else offset = B53_ARL_SRCH_CTL; + if (is63xx(dev)) + b53_write16(dev, B53_ARLIO_PAGE, offset, val); + else + b53_write8(dev, B53_ARLIO_PAGE, offset, val); +} + +static int b53_arl_search_wait(struct b53_device *dev) +{ + unsigned int timeout = 1000; + u8 reg; + do { - b53_read8(dev, B53_ARLIO_PAGE, offset, ®); + b53_read_arl_srch_ctl(dev, ®); if (!(reg & ARL_SRCH_STDN)) return -ENOENT; @@ -2048,28 +2111,61 @@ static int b53_arl_search_wait(struct b53_device *dev) return -ETIMEDOUT; } -static void b53_arl_search_rd(struct b53_device *dev, u8 idx, - struct b53_arl_entry *ent) +static void b53_arl_search_read_25(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) { u64 mac_vid; - if (is5325(dev)) { - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); - } else if (is5365(dev)) { - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); - } else { - u32 fwd_entry; + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid); +} - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), - &mac_vid); - b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), - &fwd_entry); - b53_arl_to_entry(ent, mac_vid, fwd_entry); - } +static void b53_arl_search_read_65(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid); +} + +static void b53_arl_search_read_89(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u16 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89, + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry); + b53_arl_to_entry_89(ent, mac_vid, fwd_entry); +} + +static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u16 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX, + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry); + b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry); +} + +static void b53_arl_search_read_95(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), + &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), + &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, @@ -2090,36 +2186,28 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, unsigned int count = 0, results_per_hit = 1; struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; - u8 offset; int ret; - u8 reg; if (priv->num_arl_bins > 2) results_per_hit = 2; mutex_lock(&priv->arl_mutex); - if (is5325(priv) || is5365(priv)) - offset = B53_ARL_SRCH_CTL_25; - else - offset = B53_ARL_SRCH_CTL; - /* Start search operation */ - reg = ARL_SRCH_STDN; - b53_write8(priv, B53_ARLIO_PAGE, offset, reg); + b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN); do { ret = b53_arl_search_wait(priv); if (ret) break; - b53_arl_search_rd(priv, 0, &results[0]); + b53_arl_search_read(priv, 0, &results[0]); ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) break; if (results_per_hit == 2) { - b53_arl_search_rd(priv, 1, &results[1]); + b53_arl_search_read(priv, 1, &results[1]); ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) break; @@ -2340,7 +2428,7 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct b53_device *dev = ds->priv; - unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); + unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED); if (!is5325(dev)) mask |= BR_LEARNING; @@ -2365,6 +2453,9 @@ int b53_br_flags(struct dsa_switch *ds, int port, if (flags.mask & BR_LEARNING) b53_port_set_learning(ds->priv, port, !!(flags.val & BR_LEARNING)); + if (flags.mask & BR_ISOLATED) + b53_port_set_isolated(ds->priv, port, + !!(flags.val & BR_ISOLATED)); return 0; } @@ -2645,6 +2736,36 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_change_mtu = b53_change_mtu, }; +static const struct b53_arl_ops b53_arl_ops_25 = { + .arl_read_entry = b53_arl_read_entry_25, + .arl_write_entry = b53_arl_write_entry_25, + .arl_search_read = b53_arl_search_read_25, +}; + +static const struct b53_arl_ops b53_arl_ops_65 = { + .arl_read_entry = b53_arl_read_entry_25, + .arl_write_entry = b53_arl_write_entry_25, + .arl_search_read = b53_arl_search_read_65, +}; + +static const struct b53_arl_ops b53_arl_ops_89 = { + .arl_read_entry = b53_arl_read_entry_89, + .arl_write_entry = b53_arl_write_entry_89, + .arl_search_read = b53_arl_search_read_89, +}; + +static const struct b53_arl_ops b53_arl_ops_63xx = { + .arl_read_entry = b53_arl_read_entry_89, + .arl_write_entry = b53_arl_write_entry_89, + .arl_search_read = b53_arl_search_read_63xx, +}; + +static const struct b53_arl_ops b53_arl_ops_95 = { + .arl_read_entry = b53_arl_read_entry_95, + .arl_write_entry = b53_arl_write_entry_95, + .arl_search_read = b53_arl_search_read_95, +}; + struct b53_chip_data { u32 chip_id; const char *dev_name; @@ -2658,6 +2779,7 @@ struct b53_chip_data { u8 duplex_reg; u8 jumbo_pm_reg; u8 jumbo_size_reg; + const struct b53_arl_ops *arl_ops; }; #define B53_VTA_REGS \ @@ -2677,6 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, + .arl_ops = &b53_arl_ops_25, }, { .chip_id = BCM5365_DEVICE_ID, @@ -2687,6 +2810,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, + .arl_ops = &b53_arl_ops_65, }, { .chip_id = BCM5389_DEVICE_ID, @@ -2700,6 +2824,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM5395_DEVICE_ID, @@ -2713,6 +2838,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM5397_DEVICE_ID, @@ -2726,6 +2852,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM5398_DEVICE_ID, @@ -2739,6 +2866,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM53101_DEVICE_ID, @@ -2752,6 +2880,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53115_DEVICE_ID, @@ -2765,6 +2894,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53125_DEVICE_ID, @@ -2778,6 +2908,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53128_DEVICE_ID, @@ -2791,19 +2922,21 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM63XX_DEVICE_ID, .dev_name = "BCM63xx", .vlans = 4096, .enabled_ports = 0, /* pdata must provide them */ - .arl_bins = 4, - .arl_buckets = 1024, + .arl_bins = 1, + .arl_buckets = 4096, .imp_port = 8, .vta_regs = B53_VTA_REGS_63XX, .duplex_reg = B53_DUPLEX_STAT_63XX, .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, + .arl_ops = &b53_arl_ops_63xx, }, { .chip_id = BCM53010_DEVICE_ID, @@ -2817,6 +2950,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53011_DEVICE_ID, @@ -2830,6 +2964,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53012_DEVICE_ID, @@ -2843,6 +2978,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53018_DEVICE_ID, @@ -2856,6 +2992,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53019_DEVICE_ID, @@ -2869,6 +3006,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM58XX_DEVICE_ID, @@ -2882,6 +3020,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM583XX_DEVICE_ID, @@ -2895,6 +3034,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, /* Starfighter 2 */ { @@ -2909,6 +3049,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM7445_DEVICE_ID, @@ -2922,6 +3063,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM7278_DEVICE_ID, @@ -2935,6 +3077,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53134_DEVICE_ID, @@ -2949,6 +3092,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, }; @@ -2977,6 +3121,7 @@ static int b53_switch_init(struct b53_device *dev) dev->num_vlans = chip->vlans; dev->num_arl_bins = chip->arl_bins; dev->num_arl_buckets = chip->arl_buckets; + dev->arl_ops = chip->arl_ops; break; } } diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 458775f95164..2bfd0e7c95c9 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -58,6 +58,17 @@ struct b53_io_ops { bool link_up); }; +struct b53_arl_entry; + +struct b53_arl_ops { + void (*arl_read_entry)(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx); + void (*arl_write_entry)(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx); + void (*arl_search_read)(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent); +}; + #define B53_INVALID_LANE 0xff enum { @@ -127,6 +138,7 @@ struct b53_device { struct mutex stats_mutex; struct mutex arl_mutex; const struct b53_io_ops *ops; + const struct b53_arl_ops *arl_ops; /* chip specific data */ u32 chip_id; @@ -341,6 +353,18 @@ static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent, ent->vid = mac_vid >> ARLTBL_VID_S_65; } +static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent, + u64 mac_vid, u16 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK_89; + ent->is_valid = !!(fwd_entry & ARLTBL_VALID_89); + ent->is_age = !!(fwd_entry & ARLTBL_AGE_89); + ent->is_static = !!(fwd_entry & ARLTBL_STATIC_89); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; +} + static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, const struct b53_arl_entry *ent) { @@ -371,6 +395,53 @@ static inline void b53_arl_from_entry_25(u64 *mac_vid, *mac_vid |= ARLTBL_AGE_25; } +static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, + const struct b53_arl_entry *ent) +{ + *mac_vid = ether_addr_to_u64(ent->mac); + *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; + *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK_89; + if (ent->is_valid) + *fwd_entry |= ARLTBL_VALID_89; + if (ent->is_static) + *fwd_entry |= ARLTBL_STATIC_89; + if (ent->is_age) + *fwd_entry |= ARLTBL_AGE_89; +} + +static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent, + u64 mac_vid, u16 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; + + ent->port = fwd_entry & ARL_SRST_PORT_ID_MASK_63XX; + ent->port >>= 1; + + ent->is_age = !!(fwd_entry & ARL_SRST_AGE_63XX); + ent->is_static = !!(fwd_entry & ARL_SRST_STATIC_63XX); + ent->is_valid = 1; +} + +static inline void b53_arl_read_entry(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + dev->arl_ops->arl_read_entry(dev, ent, idx); +} + +static inline void b53_arl_write_entry(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + dev->arl_ops->arl_write_entry(dev, ent, idx); +} + +static inline void b53_arl_search_read(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + dev->arl_ops->arl_search_read(dev, idx, ent); +} + #ifdef CONFIG_BCM47XX #include <linux/bcm47xx_nvram.h> diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8ce1ce72e938..69ebbec932f6 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -119,6 +119,10 @@ #define B53_SWITCH_CTRL 0x22 #define B53_MII_DUMB_FWDG_EN BIT(6) +/* Protected Port Selection (16 bit) */ +#define B53_PROTECTED_PORT_SEL 0x24 +#define B53_PROTECTED_PORT_SEL_25 0x26 + /* (16 bit) */ #define B53_UC_FLOOD_MASK 0x32 #define B53_MC_FLOOD_MASK 0x34 @@ -342,12 +346,20 @@ #define ARLTBL_STATIC BIT(15) #define ARLTBL_VALID BIT(16) +/* BCM5389 ARL Table Data Entry N Register format (16 bit) */ +#define ARLTBL_DATA_PORT_ID_MASK_89 GENMASK(8, 0) +#define ARLTBL_TC_MASK_89 GENMASK(12, 10) +#define ARLTBL_AGE_89 BIT(13) +#define ARLTBL_STATIC_89 BIT(14) +#define ARLTBL_VALID_89 BIT(15) + /* Maximum number of bin entries in the ARL for all switches */ #define B53_ARLTBL_MAX_BIN_ENTRIES 4 /* ARL Search Control Register (8 bit) */ #define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL_25 0x20 +#define B53_ARL_SRCH_CTL_89 0x30 #define ARL_SRCH_VLID BIT(0) #define ARL_SRCH_STDN BIT(7) @@ -355,10 +367,14 @@ #define B53_ARL_SRCH_ADDR 0x51 #define B53_ARL_SRCH_ADDR_25 0x22 #define B53_ARL_SRCH_ADDR_65 0x24 +#define B53_ARL_SRCH_ADDR_89 0x31 +#define B53_ARL_SRCH_ADDR_63XX 0x32 #define ARL_ADDR_MASK GENMASK(14, 0) /* ARL Search MAC/VID Result (64 bit) */ #define B53_ARL_SRCH_RSTL_0_MACVID 0x60 +#define B53_ARL_SRCH_RSLT_MACVID_89 0x33 +#define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34 /* Single register search result on 5325 */ #define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 @@ -368,9 +384,19 @@ /* ARL Search Data Result (32 bit) */ #define B53_ARL_SRCH_RSTL_0 0x68 +/* BCM5389 ARL Search Data Result (16 bit) */ +#define B53_ARL_SRCH_RSLT_89 0x3b + #define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10)) #define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10)) +/* 63XX ARL Search Data Result (16 bit) */ +#define B53_ARL_SRCH_RSLT_63XX 0x3c +#define ARL_SRST_PORT_ID_MASK_63XX GENMASK(9, 1) +#define ARL_SRST_TC_MASK_63XX GENMASK(13, 11) +#define ARL_SRST_AGE_63XX BIT(14) +#define ARL_SRST_STATIC_63XX BIT(15) + /************************************************************************* * IEEE 802.1X Registers *************************************************************************/ diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 650d93226d9f..4a416f2717ba 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -441,11 +441,6 @@ out: static int __init dsa_loop_init(void) { - struct fixed_phy_status status = { - .link = 1, - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - }; unsigned int i; int ret; @@ -454,7 +449,7 @@ static int __init dsa_loop_init(void) return ret; for (i = 0; i < NUM_FIXED_PHYS; i++) - phydevs[i] = fixed_phy_register(&status, NULL); + phydevs[i] = fixed_phy_register_100fd(); ret = mdio_driver_register(&dsa_loop_drv); if (ret) { diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig index 1cb053c823f7..4a9771be5d58 100644 --- a/drivers/net/dsa/lantiq/Kconfig +++ b/drivers/net/dsa/lantiq/Kconfig @@ -1,7 +1,24 @@ +config NET_DSA_LANTIQ_COMMON + tristate + select REGMAP + config NET_DSA_LANTIQ_GSWIP tristate "Lantiq / Intel GSWIP" depends on HAS_IOMEM select NET_DSA_TAG_GSWIP + select NET_DSA_LANTIQ_COMMON help This enables support for the Lantiq / Intel GSWIP 2.1 found in the xrx200 / VR9 SoC. + +config NET_DSA_MXL_GSW1XX + tristate "MaxLinear GSW1xx Ethernet switch support" + select NET_DSA_TAG_MXL_GSW1XX + select NET_DSA_LANTIQ_COMMON + help + This enables support for the MaxLinear GSW1xx family of 1GE switches + GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X + GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature + GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X + GSW141 6 port, 4 PHYs, RGMII & SGMII + GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile index 849f85ebebd6..85fce605310b 100644 --- a/drivers/net/dsa/lantiq/Makefile +++ b/drivers/net/dsa/lantiq/Makefile @@ -1 +1,3 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o +obj-$(CONFIG_NET_DSA_LANTIQ_COMMON) += lantiq_gswip_common.o +obj-$(CONFIG_NET_DSA_MXL_GSW1XX) += mxl-gsw1xx.o diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c index 2169c0814a48..57dd063c0740 100644 --- a/drivers/net/dsa/lantiq/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq/lantiq_gswip.c @@ -2,1282 +2,33 @@ /* * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs * - * Copyright (C) 2010 Lantiq Deutschland - * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> - * - * The VLAN and bridge model the GSWIP hardware uses does not directly - * matches the model DSA uses. - * - * The hardware has 64 possible table entries for bridges with one VLAN - * ID, one flow id and a list of ports for each bridge. All entries which - * match the same flow ID are combined in the mac learning table, they - * act as one global bridge. - * The hardware does not support VLAN filter on the port, but on the - * bridge, this driver converts the DSA model to the hardware. - * - * The CPU gets all the exception frames which do not match any forwarding - * rule and the CPU port is also added to all bridges. This makes it possible - * to handle all the special cases easily in software. - * At the initialization the driver allocates one bridge table entry for - * each switch port which is used when the port is used without an - * explicit bridge. This prevents the frames from being forwarded - * between all LAN ports by default. + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland */ #include "lantiq_gswip.h" #include "lantiq_pce.h" +#include <linux/clk.h> #include <linux/delay.h> -#include <linux/etherdevice.h> #include <linux/firmware.h> -#include <linux/if_bridge.h> -#include <linux/if_vlan.h> -#include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_mdio.h> -#include <linux/of_net.h> #include <linux/of_platform.h> -#include <linux/phy.h> -#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> #include <dt-bindings/mips/lantiq_rcu_gphy.h> +#include <net/dsa.h> + struct xway_gphy_match_data { char *fe_firmware_name; char *ge_firmware_name; }; -struct gswip_pce_table_entry { - u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index - u16 table; // PCE_TBL_CTRL.ADDR = pData->table - u16 key[8]; - u16 val[5]; - u16 mask; - u8 gmap; - bool type; - bool valid; - bool key_mode; -}; - -struct gswip_rmon_cnt_desc { - unsigned int size; - unsigned int offset; - const char *name; -}; - -#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} - -static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { - /** Receive Packet Count (only packets that are accepted and not discarded). */ - MIB_DESC(1, 0x1F, "RxGoodPkts"), - MIB_DESC(1, 0x23, "RxUnicastPkts"), - MIB_DESC(1, 0x22, "RxMulticastPkts"), - MIB_DESC(1, 0x21, "RxFCSErrorPkts"), - MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), - MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), - MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), - MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), - MIB_DESC(1, 0x20, "RxGoodPausePkts"), - MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), - MIB_DESC(1, 0x12, "Rx64BytePkts"), - MIB_DESC(1, 0x13, "Rx127BytePkts"), - MIB_DESC(1, 0x14, "Rx255BytePkts"), - MIB_DESC(1, 0x15, "Rx511BytePkts"), - MIB_DESC(1, 0x16, "Rx1023BytePkts"), - /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ - MIB_DESC(1, 0x17, "RxMaxBytePkts"), - MIB_DESC(1, 0x18, "RxDroppedPkts"), - MIB_DESC(1, 0x19, "RxFilteredPkts"), - MIB_DESC(2, 0x24, "RxGoodBytes"), - MIB_DESC(2, 0x26, "RxBadBytes"), - MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), - MIB_DESC(1, 0x0C, "TxGoodPkts"), - MIB_DESC(1, 0x06, "TxUnicastPkts"), - MIB_DESC(1, 0x07, "TxMulticastPkts"), - MIB_DESC(1, 0x00, "Tx64BytePkts"), - MIB_DESC(1, 0x01, "Tx127BytePkts"), - MIB_DESC(1, 0x02, "Tx255BytePkts"), - MIB_DESC(1, 0x03, "Tx511BytePkts"), - MIB_DESC(1, 0x04, "Tx1023BytePkts"), - /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ - MIB_DESC(1, 0x05, "TxMaxBytePkts"), - MIB_DESC(1, 0x08, "TxSingleCollCount"), - MIB_DESC(1, 0x09, "TxMultCollCount"), - MIB_DESC(1, 0x0A, "TxLateCollCount"), - MIB_DESC(1, 0x0B, "TxExcessCollCount"), - MIB_DESC(1, 0x0D, "TxPauseCount"), - MIB_DESC(1, 0x10, "TxDroppedPkts"), - MIB_DESC(2, 0x0E, "TxGoodBytes"), -}; - -static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->gswip + (offset * 4)); -} - -static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->gswip + (offset * 4)); -} - -static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_switch_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_switch_w(priv, val, offset); -} - -static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, - u32 cleared) -{ - u32 val; - - return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, - (val & cleared) == 0, 20, 50000); -} - -static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->mdio + (offset * 4)); -} - -static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->mdio + (offset * 4)); -} - -static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_mdio_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_mdio_w(priv, val, offset); -} - -static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->mii + (offset * 4)); -} - -static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->mii + (offset * 4)); -} - -static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_mii_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_mii_w(priv, val, offset); -} - -static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, - int port) -{ - int reg_port; - - /* MII_CFG register only exists for MII ports */ - if (!(priv->hw_info->mii_ports & BIT(port))) - return; - - reg_port = port + priv->hw_info->mii_port_reg_offset; - - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(reg_port)); -} - -static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, - int port) -{ - int reg_port; - - /* MII_PCDU register only exists for MII ports */ - if (!(priv->hw_info->mii_ports & BIT(port))) - return; - - reg_port = port + priv->hw_info->mii_port_reg_offset; - - switch (reg_port) { - case 0: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); - break; - case 1: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); - break; - case 5: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); - break; - } -} - -static int gswip_mdio_poll(struct gswip_priv *priv) -{ - int cnt = 100; - - while (likely(cnt--)) { - u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); - - if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) - return 0; - usleep_range(20, 40); - } - - return -ETIMEDOUT; -} - -static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) -{ - struct gswip_priv *priv = bus->priv; - int err; - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); - gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | - ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | - (reg & GSWIP_MDIO_CTRL_REGAD_MASK), - GSWIP_MDIO_CTRL); - - return 0; -} - -static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) -{ - struct gswip_priv *priv = bus->priv; - int err; - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | - ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | - (reg & GSWIP_MDIO_CTRL_REGAD_MASK), - GSWIP_MDIO_CTRL); - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - return gswip_mdio_r(priv, GSWIP_MDIO_READ); -} - -static int gswip_mdio(struct gswip_priv *priv) -{ - struct device_node *mdio_np, *switch_np = priv->dev->of_node; - struct device *dev = priv->dev; - struct mii_bus *bus; - int err = 0; - - mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio"); - if (!mdio_np) - mdio_np = of_get_child_by_name(switch_np, "mdio"); - - if (!of_device_is_available(mdio_np)) - goto out_put_node; - - bus = devm_mdiobus_alloc(dev); - if (!bus) { - err = -ENOMEM; - goto out_put_node; - } - - bus->priv = priv; - bus->read = gswip_mdio_rd; - bus->write = gswip_mdio_wr; - bus->name = "lantiq,xrx200-mdio"; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); - bus->parent = priv->dev; - - err = devm_of_mdiobus_register(dev, bus, mdio_np); - -out_put_node: - of_node_put(mdio_np); - - return err; -} - -static int gswip_pce_table_entry_read(struct gswip_priv *priv, - struct gswip_pce_table_entry *tbl) -{ - int i; - int err; - u16 crtl; - u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : - GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; - - mutex_lock(&priv->pce_table_lock); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, - GSWIP_PCE_TBL_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - for (i = 0; i < ARRAY_SIZE(tbl->key); i++) - tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); - - for (i = 0; i < ARRAY_SIZE(tbl->val); i++) - tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); - - tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); - - crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); - - tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); - tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); - tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; - - mutex_unlock(&priv->pce_table_lock); - - return 0; -} - -static int gswip_pce_table_entry_write(struct gswip_priv *priv, - struct gswip_pce_table_entry *tbl) -{ - int i; - int err; - u16 crtl; - u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : - GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; - - mutex_lock(&priv->pce_table_lock); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode, - GSWIP_PCE_TBL_CTRL); - - for (i = 0; i < ARRAY_SIZE(tbl->key); i++) - gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); - - for (i = 0; i < ARRAY_SIZE(tbl->val); i++) - gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); - - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode, - GSWIP_PCE_TBL_CTRL); - - gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); - - crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); - crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | - GSWIP_PCE_TBL_CTRL_GMAP_MASK); - if (tbl->type) - crtl |= GSWIP_PCE_TBL_CTRL_TYPE; - if (tbl->valid) - crtl |= GSWIP_PCE_TBL_CTRL_VLD; - crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; - crtl |= GSWIP_PCE_TBL_CTRL_BAS; - gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - - mutex_unlock(&priv->pce_table_lock); - - return err; -} - -/* Add the LAN port into a bridge with the CPU port by - * default. This prevents automatic forwarding of - * packages between the LAN ports when no explicit - * bridge is configured. - */ -static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - struct gswip_pce_table_entry vlan_mapping = {0,}; - int err; - - vlan_active.index = port + 1; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.key[0] = 0; /* vid */ - vlan_active.val[0] = port + 1 /* fid */; - vlan_active.valid = add; - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", err); - return err; - } - - if (!add) - return 0; - - vlan_mapping.index = port + 1; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - vlan_mapping.val[0] = 0 /* vid */; - vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds); - vlan_mapping.val[2] = 0; - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - return err; - } - - return 0; -} - -static int gswip_port_setup(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - int err; - - if (!dsa_is_cpu_port(ds, port)) { - err = gswip_add_single_port_br(priv, port, true); - if (err) - return err; - } - - return 0; -} - -static int gswip_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phydev) -{ - struct gswip_priv *priv = ds->priv; - - if (!dsa_is_cpu_port(ds, port)) { - u32 mdio_phy = 0; - - if (phydev) - mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); - } - - /* RMON Counter Enable for port */ - gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); - - /* enable port fetch/store dma & VLAN Modification */ - gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | - GSWIP_FDMA_PCTRL_VLANMOD_BOTH, - GSWIP_FDMA_PCTRLp(port)); - gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, - GSWIP_SDMA_PCTRLp(port)); - - return 0; -} - -static void gswip_port_disable(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, - GSWIP_FDMA_PCTRLp(port)); - gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, - GSWIP_SDMA_PCTRLp(port)); -} - -static int gswip_pce_load_microcode(struct gswip_priv *priv) -{ - int i; - int err; - - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); - gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); - - for (i = 0; i < priv->hw_info->pce_microcode_size; i++) { - gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_0, - GSWIP_PCE_TBL_VAL(0)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_1, - GSWIP_PCE_TBL_VAL(1)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_2, - GSWIP_PCE_TBL_VAL(2)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_3, - GSWIP_PCE_TBL_VAL(3)); - - /* start the table access: */ - gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, - GSWIP_PCE_TBL_CTRL); - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) - return err; - } - - /* tell the switch that the microcode is loaded */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, - GSWIP_PCE_GCTRL_0); - - return 0; -} - -static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - - /* Do not allow changing the VLAN filtering options while in bridge */ - if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) { - NL_SET_ERR_MSG_MOD(extack, - "Dynamic toggling of vlan_filtering not supported"); - return -EIO; - } - - if (vlan_filtering) { - /* Use tag based VLAN */ - gswip_switch_mask(priv, - GSWIP_PCE_VCTRL_VSR, - GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | - GSWIP_PCE_VCTRL_VEMR, - GSWIP_PCE_VCTRL(port)); - gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, - GSWIP_PCE_PCTRL_0p(port)); - } else { - /* Use port based VLAN */ - gswip_switch_mask(priv, - GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | - GSWIP_PCE_VCTRL_VEMR, - GSWIP_PCE_VCTRL_VSR, - GSWIP_PCE_VCTRL(port)); - gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM, - GSWIP_PCE_PCTRL_0p(port)); - } - - return 0; -} - -static int gswip_setup(struct dsa_switch *ds) -{ - unsigned int cpu_ports = dsa_cpu_ports(ds); - struct gswip_priv *priv = ds->priv; - struct dsa_port *cpu_dp; - int err, i; - - gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); - usleep_range(5000, 10000); - gswip_switch_w(priv, 0, GSWIP_SWRES); - - /* disable port fetch/store dma on all ports */ - for (i = 0; i < priv->hw_info->max_ports; i++) { - gswip_port_disable(ds, i); - gswip_port_vlan_filtering(ds, i, false, NULL); - } - - /* enable Switch */ - gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); - - err = gswip_pce_load_microcode(priv); - if (err) { - dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); - return err; - } - - /* Default unknown Broadcast/Multicast/Unicast port maps */ - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP1); - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP2); - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP3); - - /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an - * interoperability problem with this auto polling mechanism because - * their status registers think that the link is in a different state - * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set - * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the - * auto polling state machine consider the link being negotiated with - * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads - * to the switch port being completely dead (RX and TX are both not - * working). - * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F - * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes - * it would work fine for a few minutes to hours and then stop, on - * other device it would no traffic could be sent or received at all. - * Testing shows that when PHY auto polling is disabled these problems - * go away. - */ - gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); - - /* Configure the MDIO Clock 2.5 MHz */ - gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); - - /* bring up the mdio bus */ - err = gswip_mdio(priv); - if (err) { - dev_err(priv->dev, "mdio bus setup failed\n"); - return err; - } - - /* Disable the xMII interface and clear it's isolation bit */ - for (i = 0; i < priv->hw_info->max_ports; i++) - gswip_mii_mask_cfg(priv, - GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, - 0, i); - - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - /* enable special tag insertion on cpu port */ - gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, - GSWIP_FDMA_PCTRLp(cpu_dp->index)); - - /* accept special tag in ingress direction */ - gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, - GSWIP_PCE_PCTRL_0p(cpu_dp->index)); - } - - gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, - GSWIP_BM_QUEUE_GCTRL); - - /* VLAN aware Switching */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); - - /* Flush MAC Table */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, - GSWIP_PCE_GCTRL_0_MTFL); - if (err) { - dev_err(priv->dev, "MAC flushing didn't finish\n"); - return err; - } - - ds->mtu_enforcement_ingress = true; - - ds->configure_vlan_while_not_filtering = false; - - return 0; -} - -static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, - int port, - enum dsa_tag_protocol mp) -{ - struct gswip_priv *priv = ds->priv; - - return priv->hw_info->tag_protocol; -} - -static int gswip_vlan_active_create(struct gswip_priv *priv, - struct net_device *bridge, - int fid, u16 vid) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int idx = -1; - int err; - int i; - - /* Look for a free slot */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (!priv->vlans[i].bridge) { - idx = i; - break; - } - } - - if (idx == -1) - return -ENOSPC; - - if (fid == -1) - fid = idx; - - vlan_active.index = idx; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.key[0] = vid; - vlan_active.val[0] = fid; - vlan_active.valid = true; - - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", err); - return err; - } - - priv->vlans[idx].bridge = bridge; - priv->vlans[idx].vid = vid; - priv->vlans[idx].fid = fid; - - return idx; -} - -static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - int err; - - vlan_active.index = idx; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.valid = false; - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) - dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); - priv->vlans[idx].bridge = NULL; - - return err; -} - -static int gswip_vlan_add_unaware(struct gswip_priv *priv, - struct net_device *bridge, int port) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - bool active_vlan_created = false; - int idx = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - idx = i; - break; - } - } - - /* If this bridge is not programmed yet, add a Active VLAN table - * entry in a free slot and prepare the VLAN mapping table entry. - */ - if (idx == -1) { - idx = gswip_vlan_active_create(priv, bridge, -1, 0); - if (idx < 0) - return idx; - active_vlan_created = true; - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - /* VLAN ID byte, maps to the VLAN ID of vlan active table */ - vlan_mapping.val[0] = 0; - } else { - /* Read the existing VLAN mapping entry from the switch */ - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", - err); - return err; - } - } - - /* Update the VLAN mapping entry and write it to the switch */ - vlan_mapping.val[1] |= dsa_cpu_ports(priv->ds); - vlan_mapping.val[1] |= BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - /* In case an Active VLAN was creaetd delete it again */ - if (active_vlan_created) - gswip_vlan_active_remove(priv, idx); - return err; - } - - gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); - return 0; -} - -static int gswip_vlan_add_aware(struct gswip_priv *priv, - struct net_device *bridge, int port, - u16 vid, bool untagged, - bool pvid) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - unsigned int cpu_ports = dsa_cpu_ports(priv->ds); - bool active_vlan_created = false; - int idx = -1; - int fid = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - if (fid != -1 && fid != priv->vlans[i].fid) - dev_err(priv->dev, "one bridge with multiple flow ids\n"); - fid = priv->vlans[i].fid; - if (priv->vlans[i].vid == vid) { - idx = i; - break; - } - } - } - - /* If this bridge is not programmed yet, add a Active VLAN table - * entry in a free slot and prepare the VLAN mapping table entry. - */ - if (idx == -1) { - idx = gswip_vlan_active_create(priv, bridge, fid, vid); - if (idx < 0) - return idx; - active_vlan_created = true; - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - /* VLAN ID byte, maps to the VLAN ID of vlan active table */ - vlan_mapping.val[0] = vid; - } else { - /* Read the existing VLAN mapping entry from the switch */ - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", - err); - return err; - } - } - - vlan_mapping.val[0] = vid; - /* Update the VLAN mapping entry and write it to the switch */ - vlan_mapping.val[1] |= cpu_ports; - vlan_mapping.val[2] |= cpu_ports; - vlan_mapping.val[1] |= BIT(port); - if (untagged) - vlan_mapping.val[2] &= ~BIT(port); - else - vlan_mapping.val[2] |= BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - /* In case an Active VLAN was creaetd delete it again */ - if (active_vlan_created) - gswip_vlan_active_remove(priv, idx); - return err; - } - - if (pvid) - gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port)); - - return 0; -} - -static int gswip_vlan_remove(struct gswip_priv *priv, - struct net_device *bridge, int port, - u16 vid, bool pvid, bool vlan_aware) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int idx = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge && - (!vlan_aware || priv->vlans[i].vid == vid)) { - idx = i; - break; - } - } - - if (idx == -1) { - dev_err(priv->dev, "bridge to leave does not exists\n"); - return -ENOENT; - } - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); - return err; - } - - vlan_mapping.val[1] &= ~BIT(port); - vlan_mapping.val[2] &= ~BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - return err; - } - - /* In case all ports are removed from the bridge, remove the VLAN */ - if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) { - err = gswip_vlan_active_remove(priv, idx); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", - err); - return err; - } - } - - /* GSWIP 2.2 (GRX300) and later program here the VID directly. */ - if (pvid) - gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); - - return 0; -} - -static int gswip_port_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, - bool *tx_fwd_offload, - struct netlink_ext_ack *extack) -{ - struct net_device *br = bridge.dev; - struct gswip_priv *priv = ds->priv; - int err; - - /* When the bridge uses VLAN filtering we have to configure VLAN - * specific bridges. No bridge is configured here. - */ - if (!br_vlan_enabled(br)) { - err = gswip_vlan_add_unaware(priv, br, port); - if (err) - return err; - priv->port_vlan_filter &= ~BIT(port); - } else { - priv->port_vlan_filter |= BIT(port); - } - return gswip_add_single_port_br(priv, port, false); -} - -static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, - struct dsa_bridge bridge) -{ - struct net_device *br = bridge.dev; - struct gswip_priv *priv = ds->priv; - - gswip_add_single_port_br(priv, port, true); - - /* When the bridge uses VLAN filtering we have to configure VLAN - * specific bridges. No bridge is configured here. - */ - if (!br_vlan_enabled(br)) - gswip_vlan_remove(priv, br, port, 0, true, false); -} - -static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - unsigned int max_ports = priv->hw_info->max_ports; - int pos = max_ports; - int i, idx = -1; - - /* We only support VLAN filtering on bridges */ - if (!dsa_is_cpu_port(ds, port) && !bridge) - return -EOPNOTSUPP; - - /* Check if there is already a page for this VLAN */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge && - priv->vlans[i].vid == vlan->vid) { - idx = i; - break; - } - } - - /* If this VLAN is not programmed yet, we have to reserve - * one entry in the VLAN table. Make sure we start at the - * next position round. - */ - if (idx == -1) { - /* Look for a free slot */ - for (; pos < ARRAY_SIZE(priv->vlans); pos++) { - if (!priv->vlans[pos].bridge) { - idx = pos; - pos++; - break; - } - } - - if (idx == -1) { - NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table"); - return -ENOSPC; - } - } - - return 0; -} - -static int gswip_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - int err; - - err = gswip_port_vlan_prepare(ds, port, vlan, extack); - if (err) - return err; - - /* We have to receive all packets on the CPU port and should not - * do any VLAN filtering here. This is also called with bridge - * NULL and then we do not know for which bridge to configure - * this. - */ - if (dsa_is_cpu_port(ds, port)) - return 0; - - return gswip_vlan_add_aware(priv, bridge, port, vlan->vid, - untagged, pvid); -} - -static int gswip_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - - /* We have to receive all packets on the CPU port and should not - * do any VLAN filtering here. This is also called with bridge - * NULL and then we do not know for which bridge to configure - * this. - */ - if (dsa_is_cpu_port(ds, port)) - return 0; - - return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true); -} - -static void gswip_port_fast_age(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - int i; - int err; - - for (i = 0; i < 2048; i++) { - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.index = i; - - err = gswip_pce_table_entry_read(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, "failed to read mac bridge: %d\n", - err); - return; - } - - if (!mac_bridge.valid) - continue; - - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) - continue; - - if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, - mac_bridge.val[0])) - continue; - - mac_bridge.valid = false; - err = gswip_pce_table_entry_write(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, "failed to write mac bridge: %d\n", - err); - return; - } - } -} - -static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -{ - struct gswip_priv *priv = ds->priv; - u32 stp_state; - - switch (state) { - case BR_STATE_DISABLED: - gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, - GSWIP_SDMA_PCTRLp(port)); - return; - case BR_STATE_BLOCKING: - case BR_STATE_LISTENING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; - break; - case BR_STATE_LEARNING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; - break; - case BR_STATE_FORWARDING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; - break; - default: - dev_err(priv->dev, "invalid STP state: %d\n", state); - return; - } - - gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, - GSWIP_SDMA_PCTRLp(port)); - gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state, - GSWIP_PCE_PCTRL_0p(port)); -} - -static int gswip_port_fdb(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, bool add) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int fid = -1; - int i; - int err; - - /* Operation not supported on the CPU port, don't throw errors */ - if (!bridge) - return 0; - - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - fid = priv->vlans[i].fid; - break; - } - } - - if (fid == -1) { - dev_err(priv->dev, "no FID found for bridge %s\n", - bridge->name); - return -EINVAL; - } - - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.key_mode = true; - mac_bridge.key[0] = addr[5] | (addr[4] << 8); - mac_bridge.key[1] = addr[3] | (addr[2] << 8); - mac_bridge.key[2] = addr[1] | (addr[0] << 8); - mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); - mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ - mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; - mac_bridge.valid = add; - - err = gswip_pce_table_entry_write(priv, &mac_bridge); - if (err) - dev_err(priv->dev, "failed to write mac bridge: %d\n", err); - - return err; -} - -static int gswip_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - return gswip_port_fdb(ds, port, addr, vid, true); -} - -static int gswip_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - return gswip_port_fdb(ds, port, addr, vid, false); -} - -static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, - dsa_fdb_dump_cb_t *cb, void *data) -{ - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned char addr[ETH_ALEN]; - int i; - int err; - - for (i = 0; i < 2048; i++) { - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.index = i; - - err = gswip_pce_table_entry_read(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, - "failed to read mac bridge entry %d: %d\n", - i, err); - return err; - } - - if (!mac_bridge.valid) - continue; - - addr[5] = mac_bridge.key[0] & 0xff; - addr[4] = (mac_bridge.key[0] >> 8) & 0xff; - addr[3] = mac_bridge.key[1] & 0xff; - addr[2] = (mac_bridge.key[1] >> 8) & 0xff; - addr[1] = mac_bridge.key[2] & 0xff; - addr[0] = (mac_bridge.key[2] >> 8) & 0xff; - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { - if (mac_bridge.val[0] & BIT(port)) { - err = cb(addr, 0, true, data); - if (err) - return err; - } - } else { - if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, - mac_bridge.val[0])) { - err = cb(addr, 0, false, data); - if (err) - return err; - } - } - } - return 0; -} - -static int gswip_port_max_mtu(struct dsa_switch *ds, int port) -{ - /* Includes 8 bytes for special header. */ - return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; -} - -static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -{ - struct gswip_priv *priv = ds->priv; - - /* CPU port always has maximum mtu of user ports, so use it to set - * switch frame size, including 8 byte special header. - */ - if (dsa_is_cpu_port(ds, port)) { - new_mtu += 8; - gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, - GSWIP_MAC_FLEN); - } - - /* Enable MLEN for ports with non-standard MTUs, including the special - * header on the CPU port added above. - */ - if (new_mtu != ETH_DATA_LEN) - gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, - GSWIP_MAC_CTRL_2p(port)); - else - gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, - GSWIP_MAC_CTRL_2p(port)); - - return 0; -} - static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { @@ -1346,327 +97,6 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, MAC_10 | MAC_100 | MAC_1000; } -static void gswip_phylink_get_caps(struct dsa_switch *ds, int port, - struct phylink_config *config) -{ - struct gswip_priv *priv = ds->priv; - - priv->hw_info->phylink_get_caps(ds, port, config); -} - -static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) -{ - u32 mdio_phy; - - if (link) - mdio_phy = GSWIP_MDIO_PHY_LINK_UP; - else - mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); -} - -static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, - phy_interface_t interface) -{ - u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; - - switch (speed) { - case SPEED_10: - mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; - - if (interface == PHY_INTERFACE_MODE_RMII) - mii_cfg = GSWIP_MII_CFG_RATE_M50; - else - mii_cfg = GSWIP_MII_CFG_RATE_M2P5; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; - break; - - case SPEED_100: - mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; - - if (interface == PHY_INTERFACE_MODE_RMII) - mii_cfg = GSWIP_MII_CFG_RATE_M50; - else - mii_cfg = GSWIP_MII_CFG_RATE_M25; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; - break; - - case SPEED_1000: - mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; - - mii_cfg = GSWIP_MII_CFG_RATE_M125; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; - break; - } - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, - GSWIP_MAC_CTRL_0p(port)); -} - -static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) -{ - u32 mac_ctrl_0, mdio_phy; - - if (duplex == DUPLEX_FULL) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; - mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; - } else { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; - mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; - } - - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, - GSWIP_MAC_CTRL_0p(port)); - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); -} - -static void gswip_port_set_pause(struct gswip_priv *priv, int port, - bool tx_pause, bool rx_pause) -{ - u32 mac_ctrl_0, mdio_phy; - - if (tx_pause && rx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | - GSWIP_MDIO_PHY_FCONRX_EN; - } else if (tx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | - GSWIP_MDIO_PHY_FCONRX_DIS; - } else if (rx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | - GSWIP_MDIO_PHY_FCONRX_EN; - } else { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | - GSWIP_MDIO_PHY_FCONRX_DIS; - } - - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, - mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); - gswip_mdio_mask(priv, - GSWIP_MDIO_PHY_FCONTX_MASK | - GSWIP_MDIO_PHY_FCONRX_MASK, - mdio_phy, GSWIP_MDIO_PHYp(port)); -} - -static void gswip_phylink_mac_config(struct phylink_config *config, - unsigned int mode, - const struct phylink_link_state *state) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - u32 miicfg = 0; - - miicfg |= GSWIP_MII_CFG_LDCLKDIS; - - switch (state->interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - return; - case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_INTERNAL: - miicfg |= GSWIP_MII_CFG_MODE_MIIM; - break; - case PHY_INTERFACE_MODE_REVMII: - miicfg |= GSWIP_MII_CFG_MODE_MIIP; - break; - case PHY_INTERFACE_MODE_RMII: - miicfg |= GSWIP_MII_CFG_MODE_RMIIM; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - miicfg |= GSWIP_MII_CFG_MODE_RGMII; - break; - case PHY_INTERFACE_MODE_GMII: - miicfg |= GSWIP_MII_CFG_MODE_GMII; - break; - default: - dev_err(dp->ds->dev, - "Unsupported interface: %d\n", state->interface); - return; - } - - gswip_mii_mask_cfg(priv, - GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | - GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, - miicfg, port); - - switch (state->interface) { - case PHY_INTERFACE_MODE_RGMII_ID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | - GSWIP_MII_PCDU_RXDLY_MASK, 0, port); - break; - case PHY_INTERFACE_MODE_RGMII_RXID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); - break; - case PHY_INTERFACE_MODE_RGMII_TXID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); - break; - default: - break; - } -} - -static void gswip_phylink_mac_link_down(struct phylink_config *config, - unsigned int mode, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); - - if (!dsa_port_is_cpu(dp)) - gswip_port_set_link(priv, port, false); -} - -static void gswip_phylink_mac_link_up(struct phylink_config *config, - struct phy_device *phydev, - unsigned int mode, - phy_interface_t interface, - int speed, int duplex, - bool tx_pause, bool rx_pause) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - - if (!dsa_port_is_cpu(dp)) { - gswip_port_set_link(priv, port, true); - gswip_port_set_speed(priv, port, speed, interface); - gswip_port_set_duplex(priv, port, duplex); - gswip_port_set_pause(priv, port, tx_pause, rx_pause); - } - - gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); -} - -static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, - uint8_t *data) -{ - int i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) - ethtool_puts(&data, gswip_rmon_cnt[i].name); -} - -static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, - u32 index) -{ - u32 result; - int err; - - gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); - gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | - GSWIP_BM_RAM_CTRL_OPMOD, - table | GSWIP_BM_RAM_CTRL_BAS, - GSWIP_BM_RAM_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, - GSWIP_BM_RAM_CTRL_BAS); - if (err) { - dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", - table, index); - return 0; - } - - result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); - result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; - - return result; -} - -static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data) -{ - struct gswip_priv *priv = ds->priv; - const struct gswip_rmon_cnt_desc *rmon_cnt; - int i; - u64 high; - - for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { - rmon_cnt = &gswip_rmon_cnt[i]; - - data[i] = gswip_bcm_ram_entry_read(priv, port, - rmon_cnt->offset); - if (rmon_cnt->size == 2) { - high = gswip_bcm_ram_entry_read(priv, port, - rmon_cnt->offset + 1); - data[i] |= high << 32; - } - } -} - -static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) -{ - if (sset != ETH_SS_STATS) - return 0; - - return ARRAY_SIZE(gswip_rmon_cnt); -} - -static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - - if (priv->hw_info->mac_select_pcs) - return priv->hw_info->mac_select_pcs(config, interface); - - return NULL; -} - -static const struct phylink_mac_ops gswip_phylink_mac_ops = { - .mac_config = gswip_phylink_mac_config, - .mac_link_down = gswip_phylink_mac_link_down, - .mac_link_up = gswip_phylink_mac_link_up, - .mac_select_pcs = gswip_phylink_mac_select_pcs, -}; - -static const struct dsa_switch_ops gswip_switch_ops = { - .get_tag_protocol = gswip_get_tag_protocol, - .setup = gswip_setup, - .port_setup = gswip_port_setup, - .port_enable = gswip_port_enable, - .port_disable = gswip_port_disable, - .port_bridge_join = gswip_port_bridge_join, - .port_bridge_leave = gswip_port_bridge_leave, - .port_fast_age = gswip_port_fast_age, - .port_vlan_filtering = gswip_port_vlan_filtering, - .port_vlan_add = gswip_port_vlan_add, - .port_vlan_del = gswip_port_vlan_del, - .port_stp_state_set = gswip_port_stp_state_set, - .port_fdb_add = gswip_port_fdb_add, - .port_fdb_del = gswip_port_fdb_del, - .port_fdb_dump = gswip_port_fdb_dump, - .port_change_mtu = gswip_port_change_mtu, - .port_max_mtu = gswip_port_max_mtu, - .phylink_get_caps = gswip_phylink_get_caps, - .get_strings = gswip_get_strings, - .get_ethtool_stats = gswip_get_ethtool_stats, - .get_sset_count = gswip_get_sset_count, -}; - static const struct xway_gphy_match_data xrx200a1x_gphy_data = { .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", @@ -1887,33 +317,37 @@ remove_gphy: return err; } -static int gswip_validate_cpu_port(struct dsa_switch *ds) -{ - struct gswip_priv *priv = ds->priv; - struct dsa_port *cpu_dp; - int cpu_port = -1; - - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - if (cpu_port != -1) - return dev_err_probe(ds->dev, -EINVAL, - "only a single CPU port is supported\n"); - - cpu_port = cpu_dp->index; - } - - if (cpu_port == -1) - return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n"); +static const struct regmap_config sw_regmap_config = { + .name = "switch", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_SDMA_PCTRLp(6), +}; - if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports) - return dev_err_probe(ds->dev, -EINVAL, - "unsupported CPU port defined\n"); +static const struct regmap_config mdio_regmap_config = { + .name = "mdio", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_MDIO_PHYp(0), +}; - return 0; -} +static const struct regmap_config mii_regmap_config = { + .name = "mii", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_MII_CFGp(6), +}; static int gswip_probe(struct platform_device *pdev) { struct device_node *np, *gphy_fw_np; + __iomem void *gswip, *mdio, *mii; struct device *dev = &pdev->dev; struct gswip_priv *priv; int err; @@ -1924,15 +358,27 @@ static int gswip_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->gswip = devm_platform_ioremap_resource(pdev, 0); + gswip = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(gswip)) + return PTR_ERR(gswip); + + mdio = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mdio)) + return PTR_ERR(mdio); + + mii = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(mii)) + return PTR_ERR(mii); + + priv->gswip = devm_regmap_init_mmio(dev, gswip, &sw_regmap_config); if (IS_ERR(priv->gswip)) return PTR_ERR(priv->gswip); - priv->mdio = devm_platform_ioremap_resource(pdev, 1); + priv->mdio = devm_regmap_init_mmio(dev, mdio, &mdio_regmap_config); if (IS_ERR(priv->mdio)) return PTR_ERR(priv->mdio); - priv->mii = devm_platform_ioremap_resource(pdev, 2); + priv->mii = devm_regmap_init_mmio(dev, mii, &mii_regmap_config); if (IS_ERR(priv->mii)) return PTR_ERR(priv->mii); @@ -1944,24 +390,9 @@ static int gswip_probe(struct platform_device *pdev) if (!priv->ds) return -ENOMEM; - priv->ds->dev = dev; - priv->ds->num_ports = priv->hw_info->max_ports; - priv->ds->priv = priv; - priv->ds->ops = &gswip_switch_ops; - priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; priv->dev = dev; - mutex_init(&priv->pce_table_lock); - version = gswip_switch_r(priv, GSWIP_VERSION); - - /* The hardware has the 'major/minor' version bytes in the wrong order - * preventing numerical comparisons. Construct a 16-bit unsigned integer - * having the REV field as most significant byte and the MOD field as - * least significant byte. This is effectively swapping the two bytes of - * the version variable, but other than using swab16 it doesn't affect - * the source variable. - */ - priv->version = GSWIP_VERSION_REV(version) << 8 | - GSWIP_VERSION_MOD(version); + + regmap_read(priv->gswip, GSWIP_VERSION, &version); np = dev->of_node; switch (version) { @@ -1991,25 +422,14 @@ static int gswip_probe(struct platform_device *pdev) "gphy fw probe failed\n"); } - err = dsa_register_switch(priv->ds); - if (err) { - dev_err_probe(dev, err, "dsa switch registration failed\n"); - goto gphy_fw_remove; - } - - err = gswip_validate_cpu_port(priv->ds); + err = gswip_probe_common(priv, version); if (err) - goto disable_switch; + goto gphy_fw_remove; platform_set_drvdata(pdev, priv); - dev_info(dev, "probed GSWIP version %lx mod %lx\n", - GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version)); return 0; -disable_switch: - gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); - dsa_unregister_switch(priv->ds); gphy_fw_remove: for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); @@ -2025,7 +445,7 @@ static void gswip_remove(struct platform_device *pdev) return; /* disable the switch */ - gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); + gswip_disable_switch(priv); dsa_unregister_switch(priv->ds); diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h index 2df9c8e8cfd0..9c38e51a75e8 100644 --- a/drivers/net/dsa/lantiq/lantiq_gswip.h +++ b/drivers/net/dsa/lantiq/lantiq_gswip.h @@ -2,6 +2,7 @@ #ifndef __LANTIQ_GSWIP_H #define __LANTIQ_GSWIP_H +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/phylink.h> @@ -81,6 +82,10 @@ #define GSWIP_MII_PCDU5 0x05 #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) +#define GSWIP_MII_PCDU_TXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_TXDLY_MASK) +#define GSWIP_MII_PCDU_RXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_RXDLY_MASK) +#define GSWIP_MII_PCDU_RXDLY_DEFAULT 2000 /* picoseconds */ +#define GSWIP_MII_PCDU_TXDLY_DEFAULT 2000 /* picoseconds */ /* GSWIP Core Registers */ #define GSWIP_SWRES 0x000 @@ -157,8 +162,15 @@ #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) +/* Ethernet Switch PCE Port Control Register 3 */ +#define GSWIP_PCE_PCTRL_3p(p) (0x483 + ((p) * 0xA)) +#define GSWIP_PCE_PCTRL_3_LNDIS BIT(15) /* Learning Disable */ #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA)) #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */ +#define GSWIP_PCE_VCTRL_VINR GENMASK(2, 1) /* VLAN Ingress Tag Rule */ +#define GSWIP_PCE_VCTRL_VINR_ALL 0 /* Admit tagged and untagged packets */ +#define GSWIP_PCE_VCTRL_VINR_TAGGED 1 /* Admit only tagged packets */ +#define GSWIP_PCE_VCTRL_VINR_UNTAGGED 2 /* Admit only untagged packets */ #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */ #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */ #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */ @@ -186,6 +198,12 @@ #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */ #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ +#define GSWIP_MAC_CTRL_4p(p) (0x907 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_4_LPIEN BIT(7) /* LPI Mode Enable */ +#define GSWIP_MAC_CTRL_4_GWAIT_MASK GENMASK(14, 8) /* LPI Wait Time 1G */ +#define GSWIP_MAC_CTRL_4_GWAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_GWAIT_MASK) +#define GSWIP_MAC_CTRL_4_WAIT_MASK GENMASK(6, 0) /* LPI Wait Time 100M */ +#define GSWIP_MAC_CTRL_4_WAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_WAIT_MASK) /* Ethernet Switch Fetch DMA Port Control Register */ #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) @@ -210,6 +228,7 @@ #define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */ #define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */ #define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */ +#define GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID BIT(1) /* Valid bit */ #define XRX200_GPHY_FW_ALIGN (16 * 1024) @@ -222,6 +241,8 @@ */ #define GSWIP_MAX_PACKET_LENGTH 2400 +#define GSWIP_VLAN_UNAWARE_PVID 0 + struct gswip_pce_microcode { u16 val_3; u16 val_2; @@ -234,6 +255,7 @@ struct gswip_hw_info { unsigned int allowed_cpu_ports; unsigned int mii_ports; int mii_port_reg_offset; + bool supports_2500m; const struct gswip_pce_microcode (*pce_microcode)[]; size_t pce_microcode_size; enum dsa_tag_protocol tag_protocol; @@ -257,9 +279,9 @@ struct gswip_vlan { }; struct gswip_priv { - __iomem void *gswip; - __iomem void *mdio; - __iomem void *mii; + struct regmap *gswip; + struct regmap *mdio; + struct regmap *mii; const struct gswip_hw_info *hw_info; const struct xway_gphy_match_data *gphy_fw_name_cfg; struct dsa_switch *ds; @@ -268,9 +290,12 @@ struct gswip_priv { struct gswip_vlan vlans[64]; int num_gphy_fw; struct gswip_gphy_fw *gphy_fw; - u32 port_vlan_filter; struct mutex pce_table_lock; u16 version; }; +void gswip_disable_switch(struct gswip_priv *priv); + +int gswip_probe_common(struct gswip_priv *priv, u32 version); + #endif /* __LANTIQ_GSWIP_H */ diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c new file mode 100644 index 000000000000..122ccea4057b --- /dev/null +++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c @@ -0,0 +1,1737 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel / MaxLinear GSWIP common function library + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + * + * The VLAN and bridge model the GSWIP hardware uses does not directly + * matches the model DSA uses. + * + * The hardware has 64 possible table entries for bridges with one VLAN + * ID, one flow id and a list of ports for each bridge. All entries which + * match the same flow ID are combined in the mac learning table, they + * act as one global bridge. + * The hardware does not support VLAN filter on the port, but on the + * bridge, this driver converts the DSA model to the hardware. + * + * The CPU gets all the exception frames which do not match any forwarding + * rule and the CPU port is also added to all bridges. This makes it possible + * to handle all the special cases easily in software. + * At the initialization the driver allocates one bridge table entry for + * each switch port which is used when the port is used without an + * explicit bridge. This prevents the frames from being forwarded + * between all LAN ports by default. + */ + +#include "lantiq_gswip.h" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/regmap.h> +#include <net/dsa.h> + +struct gswip_pce_table_entry { + u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index + u16 table; // PCE_TBL_CTRL.ADDR = pData->table + u16 key[8]; + u16 val[5]; + u16 mask; + u8 gmap; + bool type; + bool valid; + bool key_mode; +}; + +struct gswip_rmon_cnt_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} + +static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { + /** Receive Packet Count (only packets that are accepted and not discarded). */ + MIB_DESC(1, 0x1F, "RxGoodPkts"), + MIB_DESC(1, 0x23, "RxUnicastPkts"), + MIB_DESC(1, 0x22, "RxMulticastPkts"), + MIB_DESC(1, 0x21, "RxFCSErrorPkts"), + MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), + MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), + MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), + MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), + MIB_DESC(1, 0x20, "RxGoodPausePkts"), + MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), + MIB_DESC(1, 0x12, "Rx64BytePkts"), + MIB_DESC(1, 0x13, "Rx127BytePkts"), + MIB_DESC(1, 0x14, "Rx255BytePkts"), + MIB_DESC(1, 0x15, "Rx511BytePkts"), + MIB_DESC(1, 0x16, "Rx1023BytePkts"), + /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x17, "RxMaxBytePkts"), + MIB_DESC(1, 0x18, "RxDroppedPkts"), + MIB_DESC(1, 0x19, "RxFilteredPkts"), + MIB_DESC(2, 0x24, "RxGoodBytes"), + MIB_DESC(2, 0x26, "RxBadBytes"), + MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), + MIB_DESC(1, 0x0C, "TxGoodPkts"), + MIB_DESC(1, 0x06, "TxUnicastPkts"), + MIB_DESC(1, 0x07, "TxMulticastPkts"), + MIB_DESC(1, 0x00, "Tx64BytePkts"), + MIB_DESC(1, 0x01, "Tx127BytePkts"), + MIB_DESC(1, 0x02, "Tx255BytePkts"), + MIB_DESC(1, 0x03, "Tx511BytePkts"), + MIB_DESC(1, 0x04, "Tx1023BytePkts"), + /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x05, "TxMaxBytePkts"), + MIB_DESC(1, 0x08, "TxSingleCollCount"), + MIB_DESC(1, 0x09, "TxMultCollCount"), + MIB_DESC(1, 0x0A, "TxLateCollCount"), + MIB_DESC(1, 0x0B, "TxExcessCollCount"), + MIB_DESC(1, 0x0D, "TxPauseCount"), + MIB_DESC(1, 0x10, "TxDroppedPkts"), + MIB_DESC(2, 0x0E, "TxGoodBytes"), +}; + +static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, + u32 cleared) +{ + u32 val; + + return regmap_read_poll_timeout(priv->gswip, offset, val, + !(val & cleared), 20, 50000); +} + +static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set, + int port) +{ + int reg_port; + + /* MII_CFG register only exists for MII ports */ + if (!(priv->hw_info->mii_ports & BIT(port))) + return; + + reg_port = port + priv->hw_info->mii_port_reg_offset; + + regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask, + set); +} + +static int gswip_mdio_poll(struct gswip_priv *priv) +{ + u32 ctrl; + + return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl, + !(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000); +} + +static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val); + regmap_write(priv->mdio, GSWIP_MDIO_CTRL, + GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK)); + + return 0; +} + +static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) +{ + struct gswip_priv *priv = bus->priv; + u32 val; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + regmap_write(priv->mdio, GSWIP_MDIO_CTRL, + GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK)); + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val); + if (err) + return err; + + return val; +} + +static int gswip_mdio(struct gswip_priv *priv) +{ + struct device_node *mdio_np, *switch_np = priv->dev->of_node; + struct device *dev = priv->dev; + struct mii_bus *bus; + int err = 0; + + mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio"); + if (!mdio_np) + mdio_np = of_get_child_by_name(switch_np, "mdio"); + + if (!of_device_is_available(mdio_np)) + goto out_put_node; + + bus = devm_mdiobus_alloc(dev); + if (!bus) { + err = -ENOMEM; + goto out_put_node; + } + + bus->priv = priv; + bus->read = gswip_mdio_rd; + bus->write = gswip_mdio_wr; + bus->name = "lantiq,xrx200-mdio"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); + bus->parent = priv->dev; + + err = devm_of_mdiobus_register(dev, bus, mdio_np); + +out_put_node: + of_node_put(mdio_np); + + return err; +} + +static int gswip_pce_table_entry_read(struct gswip_priv *priv, + struct gswip_pce_table_entry *tbl) +{ + int i; + int err; + u32 crtl; + u32 tmp; + u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : + GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; + + mutex_lock(&priv->pce_table_lock); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + goto out_unlock; + + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index); + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK | + GSWIP_PCE_TBL_CTRL_BAS, + tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(tbl->key); i++) { + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp); + if (err) + goto out_unlock; + tbl->key[i] = tmp; + } + for (i = 0; i < ARRAY_SIZE(tbl->val); i++) { + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp); + if (err) + goto out_unlock; + tbl->val[i] = tmp; + } + + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp); + if (err) + goto out_unlock; + + tbl->mask = tmp; + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl); + if (err) + goto out_unlock; + + tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); + tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); + tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; + +out_unlock: + mutex_unlock(&priv->pce_table_lock); + + return err; +} + +static int gswip_pce_table_entry_write(struct gswip_priv *priv, + struct gswip_pce_table_entry *tbl) +{ + int i; + int err; + u32 crtl; + u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; + + mutex_lock(&priv->pce_table_lock); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) { + mutex_unlock(&priv->pce_table_lock); + return err; + } + + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index); + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + tbl->table | addr_mode); + + for (i = 0; i < ARRAY_SIZE(tbl->key); i++) + regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]); + + for (i = 0; i < ARRAY_SIZE(tbl->val); i++) + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]); + + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + tbl->table | addr_mode); + + regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask); + + regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl); + crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | + GSWIP_PCE_TBL_CTRL_GMAP_MASK); + if (tbl->type) + crtl |= GSWIP_PCE_TBL_CTRL_TYPE; + if (tbl->valid) + crtl |= GSWIP_PCE_TBL_CTRL_VLD; + crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; + crtl |= GSWIP_PCE_TBL_CTRL_BAS; + regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + + mutex_unlock(&priv->pce_table_lock); + + return err; +} + +/* Add the LAN port into a bridge with the CPU port by + * default. This prevents automatic forwarding of + * packages between the LAN ports when no explicit + * bridge is configured. + */ +static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + struct gswip_pce_table_entry vlan_mapping = {0,}; + int err; + + vlan_active.index = port + 1; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID; + vlan_active.val[0] = port + 1 /* fid */; + vlan_active.valid = add; + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", err); + return err; + } + + if (!add) + return 0; + + vlan_mapping.index = port + 1; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID; + vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds); + vlan_mapping.val[2] = 0; + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + return err; + } + + return 0; +} + +static int gswip_port_set_learning(struct gswip_priv *priv, int port, + bool enable) +{ + if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + return -EOPNOTSUPP; + + /* learning disable bit */ + return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port), + GSWIP_PCE_PCTRL_3_LNDIS, + enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS); +} + +static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + unsigned long supported = 0; + + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + supported |= BR_LEARNING; + + if (flags.mask & ~supported) + return -EINVAL; + + return 0; +} + +static int gswip_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + + if (flags.mask & BR_LEARNING) + return gswip_port_set_learning(priv, port, + !!(flags.val & BR_LEARNING)); + + return 0; +} + +static int gswip_port_setup(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + int err; + + if (!dsa_is_cpu_port(ds, port)) { + err = gswip_add_single_port_br(priv, port, true); + if (err) + return err; + } + + return 0; +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + u32 mdio_phy = 0; + + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_ADDR_MASK, + mdio_phy); + } + + /* RMON Counter Enable for port */ + regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN); + + /* enable port fetch/store dma & VLAN Modification */ + regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port), + GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH); + regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + + return 0; +} + +static void gswip_port_disable(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + + regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port), + GSWIP_FDMA_PCTRL_EN); + regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); +} + +static int gswip_pce_load_microcode(struct gswip_priv *priv) +{ + int i; + int err; + + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR); + regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0); + + for (i = 0; i < priv->hw_info->pce_microcode_size; i++) { + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0), + (*priv->hw_info->pce_microcode)[i].val_0); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1), + (*priv->hw_info->pce_microcode)[i].val_1); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2), + (*priv->hw_info->pce_microcode)[i].val_2); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3), + (*priv->hw_info->pce_microcode)[i].val_3); + + /* start the table access: */ + regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + return err; + } + + /* tell the switch that the microcode is loaded */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MC_VALID); + + return 0; +} + +static void gswip_port_commit_pvid(struct gswip_priv *priv, int port) +{ + struct dsa_port *dp = dsa_to_port(priv->ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); + u32 vinr; + int idx; + + if (!dsa_port_is_user(dp)) + return; + + if (br) { + u16 pvid = GSWIP_VLAN_UNAWARE_PVID; + + if (br_vlan_enabled(br)) + br_vlan_get_pvid(br, &pvid); + + /* VLAN-aware bridge ports with no PVID will use Active VLAN + * index 0. The expectation is that this drops all untagged and + * VID-0 tagged ingress traffic. + */ + idx = 0; + for (int i = priv->hw_info->max_ports; + i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == br && + priv->vlans[i].vid == pvid) { + idx = i; + break; + } + } + } else { + /* The Active VLAN table index as configured by + * gswip_add_single_port_br() + */ + idx = port + 1; + } + + vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED; + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_VINR, + FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr)); + + /* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed + * directly instead of referencing the index in the Active VLAN Tablet. + * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even + * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior. + */ + regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx); +} + +static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + + if (vlan_filtering) { + /* Use tag based VLAN */ + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_VSR | + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0, + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0); + regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_TVM); + } else { + /* Use port based VLAN */ + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0 | + GSWIP_PCE_VCTRL_VSR, + GSWIP_PCE_VCTRL_VSR); + regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_TVM); + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp, + phy_interface_t interface) +{ + u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT; + u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT; + struct device_node *port_dn = dp->dn; + u16 mii_pcdu_reg; + + /* As MII_PCDU registers only exist for MII ports, silently return + * unless the port is an MII port + */ + if (!(priv->hw_info->mii_ports & BIT(dp->index))) + return; + + switch (dp->index + priv->hw_info->mii_port_reg_offset) { + case 0: + mii_pcdu_reg = GSWIP_MII_PCDU0; + break; + case 1: + mii_pcdu_reg = GSWIP_MII_PCDU1; + break; + case 5: + mii_pcdu_reg = GSWIP_MII_PCDU5; + break; + default: + return; + } + + /* legacy code to set default delays according to the interface mode */ + switch (interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + tx_delay = 0; + rx_delay = 0; + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + rx_delay = 0; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + tx_delay = 0; + break; + default: + break; + } + + /* allow settings delays using device tree properties */ + of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); + of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); + + regmap_write_bits(priv->mii, mii_pcdu_reg, + GSWIP_MII_PCDU_TXDLY_MASK | + GSWIP_MII_PCDU_RXDLY_MASK, + GSWIP_MII_PCDU_TXDLY(tx_delay) | + GSWIP_MII_PCDU_RXDLY(rx_delay)); +} + +static int gswip_setup(struct dsa_switch *ds) +{ + unsigned int cpu_ports = dsa_cpu_ports(ds); + struct gswip_priv *priv = ds->priv; + struct dsa_port *cpu_dp; + int err, i; + + regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0); + usleep_range(5000, 10000); + regmap_write(priv->gswip, GSWIP_SWRES, 0); + + /* disable port fetch/store dma on all ports */ + for (i = 0; i < priv->hw_info->max_ports; i++) { + gswip_port_disable(ds, i); + gswip_port_vlan_filtering(ds, i, false, NULL); + } + + /* enable Switch */ + regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE); + + err = gswip_pce_load_microcode(priv); + if (err) { + dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); + return err; + } + + /* Default unknown Broadcast/Multicast/Unicast port maps */ + regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports); + regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports); + regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports); + + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an + * interoperability problem with this auto polling mechanism because + * their status registers think that the link is in a different state + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the + * auto polling state machine consider the link being negotiated with + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads + * to the switch port being completely dead (RX and TX are both not + * working). + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes + * it would work fine for a few minutes to hours and then stop, on + * other device it would no traffic could be sent or received at all. + * Testing shows that when PHY auto polling is disabled these problems + * go away. + */ + regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0); + + /* Configure the MDIO Clock 2.5 MHz */ + regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09); + + /* bring up the mdio bus */ + err = gswip_mdio(priv); + if (err) { + dev_err(priv->dev, "mdio bus setup failed\n"); + return err; + } + + /* Disable the xMII interface and clear it's isolation bit */ + for (i = 0; i < priv->hw_info->max_ports; i++) + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, + 0, i); + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + /* enable special tag insertion on cpu port */ + regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index), + GSWIP_FDMA_PCTRL_STEN); + + /* accept special tag in ingress direction */ + regmap_set_bits(priv->gswip, + GSWIP_PCE_PCTRL_0p(cpu_dp->index), + GSWIP_PCE_PCTRL_0_INGRESS); + } + + regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL, + GSWIP_BM_QUEUE_GCTRL_GL_MOD); + + /* VLAN aware Switching */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_VLAN); + + /* Flush MAC Table */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MTFL); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MTFL); + if (err) { + dev_err(priv->dev, "MAC flushing didn't finish\n"); + return err; + } + + ds->mtu_enforcement_ingress = true; + + return 0; +} + +static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) +{ + struct gswip_priv *priv = ds->priv; + + return priv->hw_info->tag_protocol; +} + +static int gswip_vlan_active_create(struct gswip_priv *priv, + struct net_device *bridge, + int fid, u16 vid) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int idx = -1; + int err; + int i; + + /* Look for a free slot */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (!priv->vlans[i].bridge) { + idx = i; + break; + } + } + + if (idx == -1) + return -ENOSPC; + + if (fid == -1) + fid = idx; + + vlan_active.index = idx; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.key[0] = vid; + vlan_active.val[0] = fid; + vlan_active.valid = true; + + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", err); + return err; + } + + priv->vlans[idx].bridge = bridge; + priv->vlans[idx].vid = vid; + priv->vlans[idx].fid = fid; + + return idx; +} + +static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + int err; + + vlan_active.index = idx; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.valid = false; + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) + dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); + priv->vlans[idx].bridge = NULL; + + return err; +} + +static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge, + int port, u16 vid, bool untagged, bool pvid, + bool vlan_aware) +{ + struct gswip_pce_table_entry vlan_mapping = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + unsigned int cpu_ports = dsa_cpu_ports(priv->ds); + bool active_vlan_created = false; + int fid = -1, idx = -1; + int i, err; + + /* Check if there is already a page for this bridge */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge) { + if (vlan_aware) { + if (fid != -1 && fid != priv->vlans[i].fid) + dev_err(priv->dev, "one bridge with multiple flow ids\n"); + fid = priv->vlans[i].fid; + } + if (priv->vlans[i].vid == vid) { + idx = i; + break; + } + } + } + + /* If this bridge is not programmed yet, add a Active VLAN table + * entry in a free slot and prepare the VLAN mapping table entry. + */ + if (idx == -1) { + idx = gswip_vlan_active_create(priv, bridge, fid, vid); + if (idx < 0) + return idx; + active_vlan_created = true; + + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + } else { + /* Read the existing VLAN mapping entry from the switch */ + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + err = gswip_pce_table_entry_read(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to read VLAN mapping: %d\n", + err); + return err; + } + } + + /* VLAN ID byte, maps to the VLAN ID of vlan active table */ + vlan_mapping.val[0] = vid; + /* Update the VLAN mapping entry and write it to the switch */ + vlan_mapping.val[1] |= cpu_ports; + vlan_mapping.val[1] |= BIT(port); + if (vlan_aware) + vlan_mapping.val[2] |= cpu_ports; + if (untagged) + vlan_mapping.val[2] &= ~BIT(port); + else + vlan_mapping.val[2] |= BIT(port); + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + /* In case an Active VLAN was creaetd delete it again */ + if (active_vlan_created) + gswip_vlan_active_remove(priv, idx); + return err; + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static int gswip_vlan_remove(struct gswip_priv *priv, + struct net_device *bridge, int port, + u16 vid) +{ + struct gswip_pce_table_entry vlan_mapping = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int idx = -1; + int i; + int err; + + /* Check if there is already a page for this bridge */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge && + priv->vlans[i].vid == vid) { + idx = i; + break; + } + } + + if (idx == -1) { + dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n", + port, vid, bridge ? bridge->name : "(null)"); + return -ENOENT; + } + + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + err = gswip_pce_table_entry_read(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); + return err; + } + + vlan_mapping.val[1] &= ~BIT(port); + vlan_mapping.val[2] &= ~BIT(port); + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + return err; + } + + /* In case all ports are removed from the bridge, remove the VLAN */ + if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) { + err = gswip_vlan_active_remove(priv, idx); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", + err); + return err; + } + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static int gswip_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct net_device *br = bridge.dev; + struct gswip_priv *priv = ds->priv; + int err; + + /* Set up the VLAN for VLAN-unaware bridging for this port, and remove + * it from the "single-port bridge" through which it was operating as + * standalone. + */ + err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID, + true, true, false); + if (err) + return err; + + return gswip_add_single_port_br(priv, port, false); +} + +static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct net_device *br = bridge.dev; + struct gswip_priv *priv = ds->priv; + + /* Add the port back to the "single-port bridge", and remove it from + * the VLAN-unaware PVID created for this bridge. + */ + gswip_add_single_port_br(priv, port, true); + gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID); +} + +static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + unsigned int max_ports = priv->hw_info->max_ports; + int pos = max_ports; + int i, idx = -1; + + /* We only support VLAN filtering on bridges */ + if (!dsa_is_cpu_port(ds, port) && !bridge) + return -EOPNOTSUPP; + + /* Check if there is already a page for this VLAN */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge && + priv->vlans[i].vid == vlan->vid) { + idx = i; + break; + } + } + + /* If this VLAN is not programmed yet, we have to reserve + * one entry in the VLAN table. Make sure we start at the + * next position round. + */ + if (idx == -1) { + /* Look for a free slot */ + for (; pos < ARRAY_SIZE(priv->vlans); pos++) { + if (!priv->vlans[pos].bridge) { + idx = pos; + pos++; + break; + } + } + + if (idx == -1) { + NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table"); + return -ENOSPC; + } + } + + return 0; +} + +static int gswip_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + int err; + + if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID) + return 0; + + err = gswip_port_vlan_prepare(ds, port, vlan, extack); + if (err) + return err; + + /* We have to receive all packets on the CPU port and should not + * do any VLAN filtering here. This is also called with bridge + * NULL and then we do not know for which bridge to configure + * this. + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid, + true); +} + +static int gswip_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + + if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID) + return 0; + + /* We have to receive all packets on the CPU port and should not + * do any VLAN filtering here. This is also called with bridge + * NULL and then we do not know for which bridge to configure + * this. + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + return gswip_vlan_remove(priv, bridge, port, vlan->vid); +} + +static void gswip_port_fast_age(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + int i; + int err; + + for (i = 0; i < 2048; i++) { + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.index = i; + + err = gswip_pce_table_entry_read(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, "failed to read mac bridge: %d\n", + err); + return; + } + + if (!mac_bridge.valid) + continue; + + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) + continue; + + if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) + continue; + + mac_bridge.valid = false; + err = gswip_pce_table_entry_write(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, "failed to write mac bridge: %d\n", + err); + return; + } + } +} + +static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct gswip_priv *priv = ds->priv; + u32 stp_state; + + switch (state) { + case BR_STATE_DISABLED: + regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + return; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; + break; + case BR_STATE_LEARNING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; + break; + case BR_STATE_FORWARDING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; + break; + default: + dev_err(priv->dev, "invalid STP state: %d\n", state); + return; + } + + regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_PSTATE_MASK, + stp_state); +} + +static int gswip_port_fdb(struct dsa_switch *ds, int port, + struct net_device *bridge, const unsigned char *addr, + u16 vid, bool add) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int fid = -1; + int i; + int err; + + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge) { + fid = priv->vlans[i].fid; + break; + } + } + + if (fid == -1) { + dev_err(priv->dev, "no FID found for bridge %s\n", + bridge->name); + return -EINVAL; + } + + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.key_mode = true; + mac_bridge.key[0] = addr[5] | (addr[4] << 8); + mac_bridge.key[1] = addr[3] | (addr[2] << 8); + mac_bridge.key[2] = addr[1] | (addr[0] << 8); + mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); + mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC)) + mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC | + GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0; + else + mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; + + mac_bridge.valid = add; + + err = gswip_pce_table_entry_write(priv, &mac_bridge); + if (err) + dev_err(priv->dev, "failed to write mac bridge: %d\n", err); + + return err; +} + +static int gswip_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + if (db.type != DSA_DB_BRIDGE) + return -EOPNOTSUPP; + + return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true); +} + +static int gswip_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + if (db.type != DSA_DB_BRIDGE) + return -EOPNOTSUPP; + + return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false); +} + +static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + unsigned char addr[ETH_ALEN]; + int i; + int err; + + for (i = 0; i < 2048; i++) { + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.index = i; + + err = gswip_pce_table_entry_read(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, + "failed to read mac bridge entry %d: %d\n", + i, err); + return err; + } + + if (!mac_bridge.valid) + continue; + + addr[5] = mac_bridge.key[0] & 0xff; + addr[4] = (mac_bridge.key[0] >> 8) & 0xff; + addr[3] = mac_bridge.key[1] & 0xff; + addr[2] = (mac_bridge.key[1] >> 8) & 0xff; + addr[1] = mac_bridge.key[2] & 0xff; + addr[0] = (mac_bridge.key[2] >> 8) & 0xff; + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { + if (mac_bridge.val[0] & BIT(port)) { + err = cb(addr, 0, true, data); + if (err) + return err; + } + } else { + if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) { + err = cb(addr, 0, false, data); + if (err) + return err; + } + } + } + return 0; +} + +static int gswip_port_max_mtu(struct dsa_switch *ds, int port) +{ + /* Includes 8 bytes for special header. */ + return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; +} + +static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct gswip_priv *priv = ds->priv; + + /* CPU port always has maximum mtu of user ports, so use it to set + * switch frame size, including 8 byte special header. + */ + if (dsa_is_cpu_port(ds, port)) { + new_mtu += 8; + regmap_write(priv->gswip, GSWIP_MAC_FLEN, + VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN); + } + + /* Enable MLEN for ports with non-standard MTUs, including the special + * header on the CPU port added above. + */ + if (new_mtu != ETH_DATA_LEN) + regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port), + GSWIP_MAC_CTRL_2_MLEN); + else + regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port), + GSWIP_MAC_CTRL_2_MLEN); + + return 0; +} + +static void gswip_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct gswip_priv *priv = ds->priv; + + priv->hw_info->phylink_get_caps(ds, port, config); +} + +static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +{ + u32 mdio_phy; + + if (link) + mdio_phy = GSWIP_MDIO_PHY_LINK_UP; + else + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_LINK_MASK, mdio_phy); +} + +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, + phy_interface_t interface) +{ + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; + + switch (speed) { + case SPEED_10: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M2P5; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_100: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M25; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_1000: + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; + + mii_cfg = GSWIP_MII_CFG_RATE_M125; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; + break; + } + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0); +} + +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) +{ + u32 mac_ctrl_0, mdio_phy; + + if (duplex == DUPLEX_FULL) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; + } + + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0); + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy); +} + +static void gswip_port_set_pause(struct gswip_priv *priv, int port, + bool tx_pause, bool rx_pause) +{ + u32 mac_ctrl_0, mdio_phy; + + if (tx_pause && rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_EN; + } else if (tx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_DIS; + } else if (rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_DIS; + } + + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0); + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK, + mdio_phy); +} + +static void gswip_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + u32 miicfg = 0; + + miicfg |= GSWIP_MII_CFG_LDCLKDIS; + + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_INTERNAL: + miicfg |= GSWIP_MII_CFG_MODE_MIIM; + break; + case PHY_INTERFACE_MODE_REVMII: + miicfg |= GSWIP_MII_CFG_MODE_MIIP; + break; + case PHY_INTERFACE_MODE_RMII: + miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out")) + miicfg |= GSWIP_MII_CFG_RMII_CLK; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + miicfg |= GSWIP_MII_CFG_MODE_RGMII; + break; + case PHY_INTERFACE_MODE_GMII: + miicfg |= GSWIP_MII_CFG_MODE_GMII; + break; + default: + dev_err(dp->ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, + miicfg, port); + + gswip_mii_delay_setup(priv, dp, state->interface); +} + +static void gswip_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); + + if (!dsa_port_is_cpu(dp)) + gswip_port_set_link(priv, port, false); +} + +static void gswip_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + + if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) { + gswip_port_set_link(priv, port, true); + gswip_port_set_speed(priv, port, speed, interface); + gswip_port_set_duplex(priv, port, duplex); + gswip_port_set_pause(priv, port, tx_pause, rx_pause); + } + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port); +} + +static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) + ethtool_puts(&data, gswip_rmon_cnt[i].name); +} + +static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, + u32 index) +{ + u32 result, val; + int err; + + regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index); + regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD | + GSWIP_BM_RAM_CTRL_BAS, + table | GSWIP_BM_RAM_CTRL_BAS); + + err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_BAS); + if (err) { + dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", + table, index); + return 0; + } + + regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result); + regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val); + result |= val << 16; + + return result; +} + +static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct gswip_priv *priv = ds->priv; + const struct gswip_rmon_cnt_desc *rmon_cnt; + int i; + u64 high; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { + rmon_cnt = &gswip_rmon_cnt[i]; + + data[i] = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset); + if (rmon_cnt->size == 2) { + high = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset + 1); + data[i] |= high << 32; + } + } +} + +static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(gswip_rmon_cnt); +} + +static int gswip_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_keee *e) +{ + if (e->tx_lpi_timer > 0x7f) + return -EINVAL; + + return 0; +} + +static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index), + GSWIP_MAC_CTRL_4_LPIEN); +} + +static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config, + u32 timer, bool tx_clock_stop) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index), + GSWIP_MAC_CTRL_4_LPIEN | + GSWIP_MAC_CTRL_4_GWAIT_MASK | + GSWIP_MAC_CTRL_4_WAIT_MASK, + GSWIP_MAC_CTRL_4_LPIEN | + GSWIP_MAC_CTRL_4_GWAIT(timer) | + GSWIP_MAC_CTRL_4_WAIT(timer)); +} + +static bool gswip_support_eee(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + return true; + + return false; +} + +static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + if (priv->hw_info->mac_select_pcs) + return priv->hw_info->mac_select_pcs(config, interface); + + return NULL; +} + +static const struct phylink_mac_ops gswip_phylink_mac_ops = { + .mac_config = gswip_phylink_mac_config, + .mac_link_down = gswip_phylink_mac_link_down, + .mac_link_up = gswip_phylink_mac_link_up, + .mac_disable_tx_lpi = gswip_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = gswip_phylink_mac_enable_tx_lpi, + .mac_select_pcs = gswip_phylink_mac_select_pcs, +}; + +static const struct dsa_switch_ops gswip_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_setup = gswip_port_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .port_pre_bridge_flags = gswip_port_pre_bridge_flags, + .port_bridge_flags = gswip_port_bridge_flags, + .port_bridge_join = gswip_port_bridge_join, + .port_bridge_leave = gswip_port_bridge_leave, + .port_fast_age = gswip_port_fast_age, + .port_vlan_filtering = gswip_port_vlan_filtering, + .port_vlan_add = gswip_port_vlan_add, + .port_vlan_del = gswip_port_vlan_del, + .port_stp_state_set = gswip_port_stp_state_set, + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, + .port_change_mtu = gswip_port_change_mtu, + .port_max_mtu = gswip_port_max_mtu, + .phylink_get_caps = gswip_phylink_get_caps, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, + .set_mac_eee = gswip_set_mac_eee, + .support_eee = gswip_support_eee, +}; + +void gswip_disable_switch(struct gswip_priv *priv) +{ + regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE); +} +EXPORT_SYMBOL_GPL(gswip_disable_switch); + +static int gswip_validate_cpu_port(struct dsa_switch *ds) +{ + struct gswip_priv *priv = ds->priv; + struct dsa_port *cpu_dp; + int cpu_port = -1; + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + if (cpu_port != -1) + return dev_err_probe(ds->dev, -EINVAL, + "only a single CPU port is supported\n"); + + cpu_port = cpu_dp->index; + } + + if (cpu_port == -1) + return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n"); + + if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports) + return dev_err_probe(ds->dev, -EINVAL, + "unsupported CPU port defined\n"); + + return 0; +} + +int gswip_probe_common(struct gswip_priv *priv, u32 version) +{ + int err; + + mutex_init(&priv->pce_table_lock); + + priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL); + if (!priv->ds) + return -ENOMEM; + + priv->ds->dev = priv->dev; + priv->ds->num_ports = priv->hw_info->max_ports; + priv->ds->ops = &gswip_switch_ops; + priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; + priv->ds->priv = priv; + + /* The hardware has the 'major/minor' version bytes in the wrong order + * preventing numerical comparisons. Construct a 16-bit unsigned integer + * having the REV field as most significant byte and the MOD field as + * least significant byte. This is effectively swapping the two bytes of + * the version variable, but other than using swab16 it doesn't affect + * the source variable. + */ + priv->version = GSWIP_VERSION_REV(version) << 8 | + GSWIP_VERSION_MOD(version); + + err = dsa_register_switch(priv->ds); + if (err) + return dev_err_probe(priv->dev, err, "dsa switch registration failed\n"); + + err = gswip_validate_cpu_port(priv->ds); + if (err) + goto disable_switch; + + dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n", + GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version)); + + return 0; + +disable_switch: + gswip_disable_switch(priv); + dsa_unregister_switch(priv->ds); + + return err; +} +EXPORT_SYMBOL_GPL(gswip_probe_common); + +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c new file mode 100644 index 000000000000..0816c61a47f1 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* DSA Driver for MaxLinear GSW1xx switch devices + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/regmap.h> +#include <net/dsa.h> + +#include "lantiq_gswip.h" +#include "mxl-gsw1xx.h" +#include "mxl-gsw1xx_pce.h" + +struct gsw1xx_priv { + struct mdio_device *mdio_dev; + int smdio_badr; + struct regmap *sgmii; + struct regmap *gpio; + struct regmap *clk; + struct regmap *shell; + struct phylink_pcs pcs; + phy_interface_t tbi_interface; + struct gswip_priv gswip; +}; + +static int gsw1xx_config_smdio_badr(struct gsw1xx_priv *priv, + unsigned int reg) +{ + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr = priv->smdio_badr; + int res; + + if (smdio_badr == GSW1XX_SMDIO_BADR_UNKNOWN || + reg - smdio_badr >= GSW1XX_SMDIO_BADR || + smdio_badr > reg) { + /* Configure the Switch Base Address */ + smdio_badr = reg & ~GENMASK(3, 0); + res = __mdiobus_write(bus, sw_addr, GSW1XX_SMDIO_BADR, smdio_badr); + if (res < 0) { + dev_err(&priv->mdio_dev->dev, + "%s: Error %d, configuring switch base\n", + __func__, res); + return res; + } + priv->smdio_badr = smdio_badr; + } + + return smdio_badr; +} + +static int gsw1xx_regmap_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct gsw1xx_priv *priv = context; + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr; + int res; + + smdio_badr = gsw1xx_config_smdio_badr(priv, reg); + if (smdio_badr < 0) + return smdio_badr; + + res = __mdiobus_read(bus, sw_addr, reg - smdio_badr); + if (res < 0) { + dev_err(&priv->mdio_dev->dev, "%s: Error %d reading 0x%x\n", + __func__, res, reg); + return res; + } + + *val = res; + + return 0; +} + +static int gsw1xx_regmap_write(void *context, unsigned int reg, + unsigned int val) +{ + struct gsw1xx_priv *priv = context; + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr; + int res; + + smdio_badr = gsw1xx_config_smdio_badr(priv, reg); + if (smdio_badr < 0) + return smdio_badr; + + res = __mdiobus_write(bus, sw_addr, reg - smdio_badr, val); + if (res < 0) + dev_err(&priv->mdio_dev->dev, + "%s: Error %d, writing 0x%x:0x%x\n", __func__, res, reg, + val); + + return res; +} + +static const struct regmap_bus gsw1xx_regmap_bus = { + .reg_write = gsw1xx_regmap_write, + .reg_read = gsw1xx_regmap_read, +}; + +static void gsw1xx_mdio_regmap_lock(void *mdio_lock) +{ + mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED); +} + +static void gsw1xx_mdio_regmap_unlock(void *mdio_lock) +{ + mutex_unlock(mdio_lock); +} + +static unsigned int gsw1xx_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; +} + +static struct gsw1xx_priv *pcs_to_gsw1xx(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct gsw1xx_priv, pcs); +} + +static int gsw1xx_pcs_enable(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + /* Deassert SGMII shell reset */ + return regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); +} + +static void gsw1xx_pcs_disable(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + /* Assert SGMII shell reset */ + regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + + priv->tbi_interface = PHY_INTERFACE_MODE_NA; +} + +static void gsw1xx_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, + struct phylink_link_state *state) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + int ret; + u32 val; + + ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_TBISTAT, &val); + if (ret < 0) + return; + + state->link = !!(val & GSW1XX_SGMII_TBI_TBISTAT_LINK); + state->an_complete = !!(val & GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE); + + ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, &val); + if (ret < 0) + return; + + state->duplex = (val & GSW1XX_SGMII_TBI_LPSTAT_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX) + state->pause |= MLO_PAUSE_RX; + + if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX) + state->pause |= MLO_PAUSE_TX; + + switch (FIELD_GET(GSW1XX_SGMII_TBI_LPSTAT_SPEED, val)) { + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_10: + state->speed = SPEED_10; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_100: + state->speed = SPEED_100; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000: + state->speed = SPEED_1000; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII: + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->speed = SPEED_1000; + else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_UNKNOWN; + break; + } +} + +static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr, + u16 data) +{ + int ret, val; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_D, data); + if (ret < 0) + return ret; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_A, addr); + if (ret < 0) + return ret; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_C, + GSW1XX_SGMII_PHY_WRITE | + GSW1XX_SGMII_PHY_RESET_N); + if (ret < 0) + return ret; + + return regmap_read_poll_timeout(priv->sgmii, GSW1XX_SGMII_PHY_C, + val, val & GSW1XX_SGMII_PHY_STATUS, + 1000, 100000); +} + +static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv) +{ + int ret; + u16 val; + + /* Assert and deassert SGMII shell reset */ + ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + ret = regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + /* Hardware Bringup FSM Enable */ + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_HWBU_CTRL, + GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM | + GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN); + if (ret < 0) + return ret; + + /* Configure SGMII PHY Receiver */ + val = FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_EQ, + GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF) | + GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN | + GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN | + FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT, + GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF); + + /* TODO: Take care of inverted RX pair once generic property is + * available + */ + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val); + if (ret < 0) + return ret; + + val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL, + GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF); + + /* TODO: Take care of inverted TX pair once generic property is + * available + */ + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val); + if (ret < 0) + return ret; + + /* Reset and Release TBI */ + val = GSW1XX_SGMII_TBI_TBICTL_INITTBI | GSW1XX_SGMII_TBI_TBICTL_ENTBI | + GSW1XX_SGMII_TBI_TBICTL_CRSTRR | GSW1XX_SGMII_TBI_TBICTL_CRSOFF; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val); + if (ret < 0) + return ret; + val &= ~GSW1XX_SGMII_TBI_TBICTL_INITTBI; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val); + if (ret < 0) + return ret; + + /* Release Tx Data Buffers */ + ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL, + GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB); + if (ret < 0) + return ret; + ret = regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL, + GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB); + if (ret < 0) + return ret; + + /* Release Rx Data Buffers */ + ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL, + GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB); + if (ret < 0) + return ret; + return regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL, + GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB); +} + +static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + u16 txaneg, anegctl, nco_ctrl; + bool reconf = false; + int ret = 0; + + /* do not unnecessarily disrupt link and skip resetting the hardware in + * case the PCS has previously been successfully configured for this + * interface mode + */ + if (priv->tbi_interface == interface) + reconf = true; + + /* mark PCS configuration as incomplete */ + priv->tbi_interface = PHY_INTERFACE_MODE_NA; + + if (!reconf) + ret = gsw1xx_pcs_reset(priv); + + if (ret) + return ret; + + /* override bootstrap pin settings + * OVRANEG sets ANEG Mode, Enable ANEG and restart ANEG to be + * taken from bits ANMODE, ANEGEN, RANEG of the ANEGCTL register. + * OVERABL sets ability bits in tx_config_reg to be taken from + * the TXANEGH and TXANEGL registers. + */ + anegctl = GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG | + GSW1XX_SGMII_TBI_ANEGCTL_OVRABL; + + switch (phylink_get_link_timer_ns(interface)) { + case 10000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_10US); + break; + case 1600000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS); + break; + case 5000000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS); + break; + case 10000000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS); + break; + default: + return -EINVAL; + } + + if (neg_mode & PHYLINK_PCS_NEG_INBAND) + anegctl |= GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN; + + txaneg = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* lacking a defined reverse-SGMII interface mode this + * driver only supports SGMII (MAC side) for now + */ + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE, + GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC); + txaneg |= ADVERTISE_LPACK; + } else if (interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE, + GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX); + } else { + dev_err(priv->gswip.dev, "%s: wrong interface mode %s\n", + __func__, phy_modes(interface)); + return -EINVAL; + } + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGH, + FIELD_GET(GENMASK(15, 8), txaneg)); + if (ret < 0) + return ret; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGL, + FIELD_GET(GENMASK(7, 0), txaneg)); + if (ret < 0) + return ret; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, anegctl); + if (ret < 0) + return ret; + + if (!reconf) { + /* setup SerDes clock speed */ + if (interface == PHY_INTERFACE_MODE_2500BASEX) + nco_ctrl = GSW1XX_SGMII_2G5 | GSW1XX_SGMII_2G5_NCO2; + else + nco_ctrl = GSW1XX_SGMII_1G | GSW1XX_SGMII_1G_NCO1; + + ret = regmap_update_bits(priv->clk, GSW1XX_CLK_NCO_CTRL, + GSW1XX_SGMII_HSP_MASK | + GSW1XX_SGMII_SEL, + nco_ctrl); + if (ret) + return ret; + + ret = gsw1xx_pcs_phy_xaui_write(priv, 0x30, 0x80); + if (ret) + return ret; + } + + /* PCS configuration has now been completed, store mode to prevent + * disrupting the link in case of future calls of this function for the + * same interface mode. + */ + priv->tbi_interface = interface; + + return 0; +} + +static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, + GSW1XX_SGMII_TBI_ANEGCTL_RANEG); +} + +static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, int speed, + int duplex) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + u16 lpstat; + + /* When in-band AN is enabled hardware will set lpstat */ + if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) + return; + + /* Force speed and duplex settings */ + if (interface == PHY_INTERFACE_MODE_SGMII) { + if (speed == SPEED_10) + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_10); + else if (speed == SPEED_100) + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_100); + else + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000); + } else { + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII); + } + + if (duplex == DUPLEX_FULL) + lpstat |= GSW1XX_SGMII_TBI_LPSTAT_DUPLEX; + + regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, lpstat); +} + +static const struct phylink_pcs_ops gsw1xx_pcs_ops = { + .pcs_inband_caps = gsw1xx_pcs_inband_caps, + .pcs_enable = gsw1xx_pcs_enable, + .pcs_disable = gsw1xx_pcs_disable, + .pcs_get_state = gsw1xx_pcs_get_state, + .pcs_config = gsw1xx_pcs_config, + .pcs_an_restart = gsw1xx_pcs_an_restart, + .pcs_link_up = gsw1xx_pcs_link_up, +}; + +static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct gswip_priv *priv = ds->priv; + + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + + switch (port) { + case 0: + case 1: + case 2: + case 3: + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + break; + case 4: /* port 4: SGMII */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + if (priv->hw_info->supports_2500m) { + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + config->mac_capabilities |= MAC_2500FD; + } + return; /* no support for EEE on SGMII port */ + case 5: /* port 5: RGMII or RMII */ + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); + phy_interface_set_rgmii(config->supported_interfaces); + break; + } + + config->lpi_capabilities = MAC_100FD | MAC_1000FD; + config->lpi_timer_default = 20; + memcpy(config->lpi_interfaces, config->supported_interfaces, + sizeof(config->lpi_interfaces)); +} + +static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *gswip_priv = dp->ds->priv; + struct gsw1xx_priv *gsw1xx_priv = container_of(gswip_priv, + struct gsw1xx_priv, + gswip); + + switch (dp->index) { + case GSW1XX_SGMII_PORT: + return &gsw1xx_priv->pcs; + default: + return NULL; + } +} + +static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv, + const char *name, + unsigned int reg_base, + unsigned int max_register) +{ + const struct regmap_config config = { + .name = name, + .reg_bits = 16, + .val_bits = 16, + .reg_base = reg_base, + .max_register = max_register, + .lock = gsw1xx_mdio_regmap_lock, + .unlock = gsw1xx_mdio_regmap_unlock, + .lock_arg = &priv->mdio_dev->bus->mdio_lock, + }; + + return devm_regmap_init(&priv->mdio_dev->dev, &gsw1xx_regmap_bus, + priv, &config); +} + +static int gsw1xx_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct gsw1xx_priv *priv; + u32 version; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->mdio_dev = mdiodev; + priv->smdio_badr = GSW1XX_SMDIO_BADR_UNKNOWN; + + priv->gswip.dev = dev; + priv->gswip.hw_info = of_device_get_match_data(dev); + if (!priv->gswip.hw_info) + return -EINVAL; + + priv->gswip.gswip = gsw1xx_regmap_init(priv, "switch", + GSW1XX_SWITCH_BASE, 0xfff); + if (IS_ERR(priv->gswip.gswip)) + return PTR_ERR(priv->gswip.gswip); + + priv->gswip.mdio = gsw1xx_regmap_init(priv, "mdio", GSW1XX_MMDIO_BASE, + 0xff); + if (IS_ERR(priv->gswip.mdio)) + return PTR_ERR(priv->gswip.mdio); + + priv->gswip.mii = gsw1xx_regmap_init(priv, "mii", GSW1XX_RGMII_BASE, + 0xff); + if (IS_ERR(priv->gswip.mii)) + return PTR_ERR(priv->gswip.mii); + + priv->sgmii = gsw1xx_regmap_init(priv, "sgmii", GSW1XX_SGMII_BASE, + 0xfff); + if (IS_ERR(priv->sgmii)) + return PTR_ERR(priv->sgmii); + + priv->gpio = gsw1xx_regmap_init(priv, "gpio", GSW1XX_GPIO_BASE, 0xff); + if (IS_ERR(priv->gpio)) + return PTR_ERR(priv->gpio); + + priv->clk = gsw1xx_regmap_init(priv, "clk", GSW1XX_CLK_BASE, 0xff); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->shell = gsw1xx_regmap_init(priv, "shell", GSW1XX_SHELL_BASE, + 0xff); + if (IS_ERR(priv->shell)) + return PTR_ERR(priv->shell); + + priv->pcs.ops = &gsw1xx_pcs_ops; + priv->pcs.poll = true; + __set_bit(PHY_INTERFACE_MODE_SGMII, + priv->pcs.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + priv->pcs.supported_interfaces); + if (priv->gswip.hw_info->supports_2500m) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + priv->pcs.supported_interfaces); + priv->tbi_interface = PHY_INTERFACE_MODE_NA; + + /* assert SGMII reset to power down SGMII unit */ + ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + /* configure GPIO pin-mux for MMDIO in case of external PHY connected to + * SGMII or RGMII as slave interface + */ + regmap_set_bits(priv->gpio, GPIO_ALTSEL0, 3); + regmap_set_bits(priv->gpio, GPIO_ALTSEL1, 3); + + ret = regmap_read(priv->gswip.gswip, GSWIP_VERSION, &version); + if (ret) + return ret; + + ret = gswip_probe_common(&priv->gswip, version); + if (ret) + return ret; + + dev_set_drvdata(dev, &priv->gswip); + + return 0; +} + +static void gsw1xx_remove(struct mdio_device *mdiodev) +{ + struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev); + + if (!priv) + return; + + gswip_disable_switch(priv); + + dsa_unregister_switch(priv->ds); +} + +static void gsw1xx_shutdown(struct mdio_device *mdiodev) +{ + struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev); + + if (!priv) + return; + + dev_set_drvdata(&mdiodev->dev, NULL); + + gswip_disable_switch(priv); +} + +static const struct gswip_hw_info gsw12x_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = &gsw1xx_phylink_get_caps, + .supports_2500m = true, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +static const struct gswip_hw_info gsw140_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = &gsw1xx_phylink_get_caps, + .supports_2500m = true, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +static const struct gswip_hw_info gsw141_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = gsw1xx_phylink_get_caps, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +/* + * GSW125 is the industrial temperature version of GSW120. + * GSW145 is the industrial temperature version of GSW140. + */ +static const struct of_device_id gsw1xx_of_match[] = { + { .compatible = "maxlinear,gsw120", .data = &gsw12x_data }, + { .compatible = "maxlinear,gsw125", .data = &gsw12x_data }, + { .compatible = "maxlinear,gsw140", .data = &gsw140_data }, + { .compatible = "maxlinear,gsw141", .data = &gsw141_data }, + { .compatible = "maxlinear,gsw145", .data = &gsw140_data }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, gsw1xx_of_match); + +static struct mdio_driver gsw1xx_driver = { + .probe = gsw1xx_probe, + .remove = gsw1xx_remove, + .shutdown = gsw1xx_shutdown, + .mdiodrv.driver = { + .name = "mxl-gsw1xx", + .of_match_table = gsw1xx_of_match, + }, +}; + +mdio_module_driver(gsw1xx_driver); + +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h new file mode 100644 index 000000000000..38e03c048a26 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Register definitions for MaxLinear GSW1xx series switches + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + */ +#ifndef __MXL_GSW1XX_H +#define __MXL_GSW1XX_H + +#include <linux/bitfield.h> + +#define GSW1XX_PORTS 6 +/* Port used for RGMII or optional RMII */ +#define GSW1XX_MII_PORT 5 +/* Port used for SGMII */ +#define GSW1XX_SGMII_PORT 4 + +#define GSW1XX_SYS_CLK_FREQ 340000000 + +/* SMDIO switch register base address */ +#define GSW1XX_SMDIO_BADR 0x1f +#define GSW1XX_SMDIO_BADR_UNKNOWN -1 + +/* GSW1XX SGMII PCS */ +#define GSW1XX_SGMII_BASE 0xd000 +#define GSW1XX_SGMII_PHY_HWBU_CTRL 0x009 +#define GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM BIT(0) +#define GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN BIT(3) +#define GSW1XX_SGMII_TBI_TXANEGH 0x300 +#define GSW1XX_SGMII_TBI_TXANEGL 0x301 +#define GSW1XX_SGMII_TBI_ANEGCTL 0x304 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT GENMASK(1, 0) +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10US 0 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS 1 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS 2 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS 3 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN BIT(2) +#define GSW1XX_SGMII_TBI_ANEGCTL_RANEG BIT(3) +#define GSW1XX_SGMII_TBI_ANEGCTL_OVRABL BIT(4) +#define GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG BIT(5) +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE GENMASK(7, 6) +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX 1 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_PHY 2 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC 3 +#define GSW1XX_SGMII_TBI_ANEGCTL_BCOMP BIT(15) + +#define GSW1XX_SGMII_TBI_TBICTL 0x305 +#define GSW1XX_SGMII_TBI_TBICTL_INITTBI BIT(0) +#define GSW1XX_SGMII_TBI_TBICTL_ENTBI BIT(1) +#define GSW1XX_SGMII_TBI_TBICTL_CRSTRR BIT(4) +#define GSW1XX_SGMII_TBI_TBICTL_CRSOFF BIT(5) +#define GSW1XX_SGMII_TBI_TBISTAT 0x309 +#define GSW1XX_SGMII_TBI_TBISTAT_LINK BIT(0) +#define GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE BIT(1) +#define GSW1XX_SGMII_TBI_LPSTAT 0x30a +#define GSW1XX_SGMII_TBI_LPSTAT_DUPLEX BIT(0) +#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX BIT(1) +#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX BIT(2) +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED GENMASK(6, 5) +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_10 0 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_100 1 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000 2 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII 3 +#define GSW1XX_SGMII_PHY_D 0x100 +#define GSW1XX_SGMII_PHY_A 0x101 +#define GSW1XX_SGMII_PHY_C 0x102 +#define GSW1XX_SGMII_PHY_STATUS BIT(0) +#define GSW1XX_SGMII_PHY_READ BIT(4) +#define GSW1XX_SGMII_PHY_WRITE BIT(8) +#define GSW1XX_SGMII_PHY_RESET_N BIT(12) +#define GSW1XX_SGMII_PCS_RXB_CTL 0x401 +#define GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB BIT(1) +#define GSW1XX_SGMII_PCS_TXB_CTL 0x404 +#define GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB BIT(1) + +#define GSW1XX_SGMII_PHY_RX0_CFG2 0x004 +#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ GENMASK(2, 0) +#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF 2 +#define GSW1XX_SGMII_PHY_RX0_CFG2_INVERT BIT(3) +#define GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN BIT(4) +#define GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN BIT(5) +#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT GENMASK(12, 6) +#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF 20 + +#define GSW1XX_SGMII_PHY_TX0_CFG3 0x007 +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_EN BIT(12) +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL GENMASK(11, 9) +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF 4 +#define GSW1XX_SGMII_PHY_TX0_CFG3_INVERT BIT(8) + +/* GSW1XX PDI Registers */ +#define GSW1XX_SWITCH_BASE 0xe000 + +/* GSW1XX MII Registers */ +#define GSW1XX_RGMII_BASE 0xf100 + +/* GSW1XX GPIO Registers */ +#define GSW1XX_GPIO_BASE 0xf300 +#define GPIO_ALTSEL0 0x83 +#define GPIO_ALTSEL0_EXTPHY_MUX_VAL 0x03c3 +#define GPIO_ALTSEL1 0x84 +#define GPIO_ALTSEL1_EXTPHY_MUX_VAL 0x003f + +/* MDIO bus controller */ +#define GSW1XX_MMDIO_BASE 0xf400 + +/* generic IC registers */ +#define GSW1XX_SHELL_BASE 0xfa00 +#define GSW1XX_SHELL_RST_REQ 0x01 +#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5) +/* RGMII PAD Slew Control Register */ +#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78 +#define RGMII_SLEW_CFG_RX_2_5_V BIT(4) +#define RGMII_SLEW_CFG_TX_2_5_V BIT(5) + +/* SGMII clock related settings */ +#define GSW1XX_CLK_BASE 0xf900 +#define GSW1XX_CLK_NCO_CTRL 0x68 +#define GSW1XX_SGMII_HSP_MASK GENMASK(3, 2) +#define GSW1XX_SGMII_SEL BIT(1) +#define GSW1XX_SGMII_1G 0x0 +#define GSW1XX_SGMII_2G5 0xc +#define GSW1XX_SGMII_1G_NCO1 0x0 +#define GSW1XX_SGMII_2G5_NCO2 0x2 + +#endif /* __MXL_GSW1XX_H */ diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h new file mode 100644 index 000000000000..eefcd411a340 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCE microcode code update for driver for MaxLinear GSW1xx switch chips + * + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + */ + +#include "lantiq_gswip.h" + +#define INSTR 0 +#define IPV6 1 +#define LENACCU 2 + +/* GSWIP_2.X */ +enum { + OUT_MAC0 = 0, + OUT_MAC1, + OUT_MAC2, + OUT_MAC3, + OUT_MAC4, + OUT_MAC5, + OUT_ETHTYP, + OUT_VTAG0, + OUT_VTAG1, + OUT_ITAG0, + OUT_ITAG1, /* 10 */ + OUT_ITAG2, + OUT_ITAG3, + OUT_IP0, + OUT_IP1, + OUT_IP2, + OUT_IP3, + OUT_SIP0, + OUT_SIP1, + OUT_SIP2, + OUT_SIP3, /* 20 */ + OUT_SIP4, + OUT_SIP5, + OUT_SIP6, + OUT_SIP7, + OUT_DIP0, + OUT_DIP1, + OUT_DIP2, + OUT_DIP3, + OUT_DIP4, + OUT_DIP5, /* 30 */ + OUT_DIP6, + OUT_DIP7, + OUT_SESID, + OUT_PROT, + OUT_APP0, + OUT_APP1, + OUT_IGMP0, + OUT_IGMP1, + OUT_STAG0 = 61, + OUT_STAG1 = 62, + OUT_NONE = 63, +}; + +/* parser's microcode flag type */ +enum { + FLAG_ITAG = 0, + FLAG_VLAN, + FLAG_SNAP, + FLAG_PPPOE, + FLAG_IPV6, + FLAG_IPV6FL, + FLAG_IPV4, + FLAG_IGMP, + FLAG_TU, + FLAG_HOP, + FLAG_NN1, /* 10 */ + FLAG_NN2, + FLAG_END, + FLAG_NO, /* 13 */ + FLAG_SVLAN, /* 14 */ +}; + +#define PCE_MC_M(val, msk, ns, out, len, type, flags, ipv4_len) \ + { (val), (msk), ((ns) << 10 | (out) << 4 | (len) >> 1),\ + ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 } + +/* V22_2X (IPv6 issue fixed) */ +static const struct gswip_pce_microcode gsw1xx_pce_microcode[] = { + /* value mask ns fields L type flags ipv4_len */ + PCE_MC_M(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0), + PCE_MC_M(0x8100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x88A8, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x9100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x8100, 0xFFFF, 5, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x88A8, 0xFFFF, 6, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x9100, 0xFFFF, 4, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x8864, 0xFFFF, 20, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0800, 0xFFFF, 24, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x86DD, 0xFFFF, 25, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x8863, 0xFFFF, 19, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xF800, 13, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0600, 0x0600, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 15, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0xAAAA, 0xFFFF, 17, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0300, 0xFF00, 45, OUT_NONE, 0, INSTR, FLAG_SNAP, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_DIP7, 3, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 21, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0), + PCE_MC_M(0x0021, 0xFFFF, 24, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0057, 0xFFFF, 25, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x4000, 0xF000, 27, OUT_IP0, 4, INSTR, FLAG_IPV4, 1), + PCE_MC_M(0x6000, 0xF000, 30, OUT_IP0, 3, INSTR, FLAG_IPV6, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 28, OUT_IP3, 2, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 29, OUT_SIP0, 4, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, LENACCU, FLAG_NO, 0), + PCE_MC_M(0x1100, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0600, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_HOP, 0), + PCE_MC_M(0x2B00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN1, 0), + PCE_MC_M(0x3C00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN2, 0), + PCE_MC_M(0x0000, 0x0000, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x00F0, 38, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_HOP, 0), + PCE_MC_M(0x2B00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN1, 0), + PCE_MC_M(0x3C00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN2, 0), + PCE_MC_M(0x0000, 0x00FC, 44, OUT_PROT, 0, IPV6, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, IPV6, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_SIP0, 16, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_APP0, 4, INSTR, FLAG_IGMP, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), +}; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 5df8f153d511..5facffbb9c9a 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -244,7 +244,7 @@ static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg) p->phydev.link = 0; } } else if (reg == MII_BMSR) { - p->phydev.link = (val & BMSR_LSTATUS); + p->phydev.link = !!(val & BMSR_LSTATUS); } } diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c new file mode 100644 index 000000000000..944988e29127 --- /dev/null +++ b/drivers/net/dsa/yt921x.c @@ -0,0 +1,2891 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Motorcomm YT921x Switch + * + * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII, + * be sure to do your own checks before porting to another chip. + * + * Copyright (c) 2025 David Yang + */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/if_hsr.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include <net/dsa.h> + +#include "yt921x.h" + +struct yt921x_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) \ + {_size, _offset, _name} + +/* Must agree with yt921x_mib + * + * Unstructured fields (name != NULL) will appear in get_ethtool_stats(), + * structured go to their *_stats() methods, but we need their sizes and offsets + * to perform 32bit MIB overflow wraparound. + */ +static const struct yt921x_mib_desc yt921x_mib_descs[] = { + MIB_DESC(1, 0x00, NULL), /* RxBroadcast */ + MIB_DESC(1, 0x04, NULL), /* RxPause */ + MIB_DESC(1, 0x08, NULL), /* RxMulticast */ + MIB_DESC(1, 0x0c, NULL), /* RxCrcErr */ + + MIB_DESC(1, 0x10, NULL), /* RxAlignErr */ + MIB_DESC(1, 0x14, NULL), /* RxUnderSizeErr */ + MIB_DESC(1, 0x18, NULL), /* RxFragErr */ + MIB_DESC(1, 0x1c, NULL), /* RxPktSz64 */ + + MIB_DESC(1, 0x20, NULL), /* RxPktSz65To127 */ + MIB_DESC(1, 0x24, NULL), /* RxPktSz128To255 */ + MIB_DESC(1, 0x28, NULL), /* RxPktSz256To511 */ + MIB_DESC(1, 0x2c, NULL), /* RxPktSz512To1023 */ + + MIB_DESC(1, 0x30, NULL), /* RxPktSz1024To1518 */ + MIB_DESC(1, 0x34, NULL), /* RxPktSz1519ToMax */ + MIB_DESC(2, 0x38, NULL), /* RxGoodBytes */ + /* 0x3c */ + + MIB_DESC(2, 0x40, "RxBadBytes"), + /* 0x44 */ + MIB_DESC(2, 0x48, NULL), /* RxOverSzErr */ + /* 0x4c */ + + MIB_DESC(1, 0x50, NULL), /* RxDropped */ + MIB_DESC(1, 0x54, NULL), /* TxBroadcast */ + MIB_DESC(1, 0x58, NULL), /* TxPause */ + MIB_DESC(1, 0x5c, NULL), /* TxMulticast */ + + MIB_DESC(1, 0x60, NULL), /* TxUnderSizeErr */ + MIB_DESC(1, 0x64, NULL), /* TxPktSz64 */ + MIB_DESC(1, 0x68, NULL), /* TxPktSz65To127 */ + MIB_DESC(1, 0x6c, NULL), /* TxPktSz128To255 */ + + MIB_DESC(1, 0x70, NULL), /* TxPktSz256To511 */ + MIB_DESC(1, 0x74, NULL), /* TxPktSz512To1023 */ + MIB_DESC(1, 0x78, NULL), /* TxPktSz1024To1518 */ + MIB_DESC(1, 0x7c, NULL), /* TxPktSz1519ToMax */ + + MIB_DESC(2, 0x80, NULL), /* TxGoodBytes */ + /* 0x84 */ + MIB_DESC(2, 0x88, NULL), /* TxCollision */ + /* 0x8c */ + + MIB_DESC(1, 0x90, NULL), /* TxExcessiveCollistion */ + MIB_DESC(1, 0x94, NULL), /* TxMultipleCollision */ + MIB_DESC(1, 0x98, NULL), /* TxSingleCollision */ + MIB_DESC(1, 0x9c, NULL), /* TxPkt */ + + MIB_DESC(1, 0xa0, NULL), /* TxDeferred */ + MIB_DESC(1, 0xa4, NULL), /* TxLateCollision */ + MIB_DESC(1, 0xa8, "RxOAM"), + MIB_DESC(1, 0xac, "TxOAM"), +}; + +struct yt921x_info { + const char *name; + u16 major; + /* Unknown, seems to be plain enumeration */ + u8 mode; + u8 extmode; + /* Ports with integral GbE PHYs, not including MCU Port 10 */ + u16 internal_mask; + /* TODO: see comments in yt921x_dsa_phylink_get_caps() */ + u16 external_mask; +}; + +#define YT921X_PORT_MASK_INTn(port) BIT(port) +#define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0) +#define YT921X_PORT_MASK_EXT0 BIT(8) +#define YT921X_PORT_MASK_EXT1 BIT(9) + +static const struct yt921x_info yt921x_infos[] = { + { + "YT9215SC", YT9215_MAJOR, 1, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9215S", YT9215_MAJOR, 2, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9215RB", YT9215_MAJOR, 3, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9214NB", YT9215_MAJOR, 3, 2, + YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9213NB", YT9215_MAJOR, 3, 3, + YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), + YT921X_PORT_MASK_EXT1, + }, + { + "YT9218N", YT9218_MAJOR, 0, 0, + YT921X_PORT_MASK_INT0_n(8), + 0, + }, + { + "YT9218MB", YT9218_MAJOR, 1, 0, + YT921X_PORT_MASK_INT0_n(8), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + {} +}; + +#define YT921X_NAME "yt921x" + +#define YT921X_VID_UNWARE 4095 + +#define YT921X_POLL_SLEEP_US 10000 +#define YT921X_POLL_TIMEOUT_US 100000 + +/* The interval should be small enough to avoid overflow of 32bit MIBs. + * + * Until we can read MIBs from stats64 call directly (i.e. sleep + * there), we have to poll stats more frequently then it is actually needed. + * For overflow protection, normally, 100 sec interval should have been OK. + */ +#define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ) + +struct yt921x_reg_mdio { + struct mii_bus *bus; + int addr; + /* SWITCH_ID_1 / SWITCH_ID_0 of the device + * + * This is a way to multiplex multiple devices on the same MII phyaddr + * and should be configurable in DT. However, MDIO core simply doesn't + * allow multiple devices over one reg addr, so this is a fixed value + * for now until a solution is found. + * + * Keep this because we need switchid to form MII regaddrs anyway. + */ + unsigned char switchid; +}; + +/* TODO: SPI/I2C */ + +#define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds) +#define to_device(priv) ((priv)->ds.dev) + +static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp) +{ + WARN_ON(!mutex_is_locked(&priv->reg_lock)); + + return priv->reg_ops->read(priv->reg_ctx, reg, valp); +} + +static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val) +{ + WARN_ON(!mutex_is_locked(&priv->reg_lock)); + + return priv->reg_ops->write(priv->reg_ctx, reg, val); +} + +static int +yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp) +{ + u32 val; + int res; + int ret; + + ret = read_poll_timeout(yt921x_reg_read, res, + res || (val & mask) == *valp, + YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US, + false, priv, reg, &val); + if (ret) + return ret; + if (res) + return res; + + *valp = val; + return 0; +} + +static int +yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val) +{ + int res; + u32 v; + u32 u; + + res = yt921x_reg_read(priv, reg, &v); + if (res) + return res; + + u = v; + u &= ~mask; + u |= val; + if (u == v) + return 0; + + return yt921x_reg_write(priv, reg, u); +} + +static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask) +{ + return yt921x_reg_update_bits(priv, reg, 0, mask); +} + +static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask) +{ + return yt921x_reg_update_bits(priv, reg, mask, 0); +} + +static int +yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set) +{ + return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask); +} + +/* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if + * you are to write only the lower / upper 32 bits. + * + * There is no such restriction for reading, but we still provide 64-bit read + * wrappers so that we always handle u64 values. + */ + +static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp) +{ + u32 lo; + u32 hi; + int res; + + res = yt921x_reg_read(priv, reg, &lo); + if (res) + return res; + res = yt921x_reg_read(priv, reg + 4, &hi); + if (res) + return res; + + *valp = ((u64)hi << 32) | lo; + return 0; +} + +static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val) +{ + int res; + + res = yt921x_reg_write(priv, reg, (u32)val); + if (res) + return res; + return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32)); +} + +static int +yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val) +{ + int res; + u64 v; + u64 u; + + res = yt921x_reg64_read(priv, reg, &v); + if (res) + return res; + + u = v; + u &= ~mask; + u |= val; + if (u == v) + return 0; + + return yt921x_reg64_write(priv, reg, u); +} + +static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask) +{ + return yt921x_reg64_update_bits(priv, reg, mask, 0); +} + +static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp) +{ + struct yt921x_reg_mdio *mdio = context; + struct mii_bus *bus = mdio->bus; + int addr = mdio->addr; + u32 reg_addr; + u32 reg_data; + u32 val; + int res; + + /* Hold the mdio bus lock to avoid (un)locking for 4 times */ + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | + YT921X_SMI_READ; + res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); + if (res) + goto end; + + reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | + YT921X_SMI_READ; + res = __mdiobus_read(bus, addr, reg_data); + if (res < 0) + goto end; + val = (u16)res; + res = __mdiobus_read(bus, addr, reg_data); + if (res < 0) + goto end; + val = (val << 16) | (u16)res; + + *valp = val; + res = 0; + +end: + mutex_unlock(&bus->mdio_lock); + return res; +} + +static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val) +{ + struct yt921x_reg_mdio *mdio = context; + struct mii_bus *bus = mdio->bus; + int addr = mdio->addr; + u32 reg_addr; + u32 reg_data; + int res; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | + YT921X_SMI_WRITE; + res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); + if (res) + goto end; + + reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | + YT921X_SMI_WRITE; + res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_data, (u16)val); + if (res) + goto end; + + res = 0; + +end: + mutex_unlock(&bus->mdio_lock); + return res; +} + +static const struct yt921x_reg_ops yt921x_reg_ops_mdio = { + .read = yt921x_reg_mdio_read, + .write = yt921x_reg_mdio_write, +}; + +/* TODO: SPI/I2C */ + +static int yt921x_intif_wait(struct yt921x_priv *priv) +{ + u32 val = 0; + + return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START, + &val); +} + +static int +yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + u32 val; + int res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_READ; + res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val); + if (res) + return res; + + if ((u16)val != val) + dev_info(dev, + "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", + __func__, port, reg, val); + *valp = (u16)val; + return 0; +} + +static int +yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val) +{ + u32 mask; + u32 ctrl; + int res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_WRITE; + res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + return yt921x_intif_wait(priv); +} + +static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg) +{ + struct yt921x_priv *priv = mbus->priv; + u16 val; + int res; + + if (port >= YT921X_PORT_NUM) + return U16_MAX; + + mutex_lock(&priv->reg_lock); + res = yt921x_intif_read(priv, port, reg, &val); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + return val; +} + +static int +yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data) +{ + struct yt921x_priv *priv = mbus->priv; + int res; + + if (port >= YT921X_PORT_NUM) + return -ENODEV; + + mutex_lock(&priv->reg_lock); + res = yt921x_intif_write(priv, port, reg, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp) +{ + struct device *dev = to_device(priv); + struct mii_bus *mbus; + int res; + + mbus = devm_mdiobus_alloc(dev); + if (!mbus) + return -ENOMEM; + + mbus->name = "YT921x internal MDIO bus"; + snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + mbus->priv = priv; + mbus->read = yt921x_mbus_int_read; + mbus->write = yt921x_mbus_int_write; + mbus->parent = dev; + mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0); + + res = devm_of_mdiobus_register(dev, mbus, mnp); + if (res) + return res; + + priv->mbus_int = mbus; + + return 0; +} + +static int yt921x_extif_wait(struct yt921x_priv *priv) +{ + u32 val = 0; + + return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START, + &val); +} + +static int +yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + u32 val; + int res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ; + res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val); + if (res) + return res; + + if ((u16)val != val) + dev_info(dev, + "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", + __func__, port, reg, val); + *valp = (u16)val; + return 0; +} + +static int +yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val) +{ + u32 mask; + u32 ctrl; + int res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE; + res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + return yt921x_extif_wait(priv); +} + +static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg) +{ + struct yt921x_priv *priv = mbus->priv; + u16 val; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_extif_read(priv, port, reg, &val); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + return val; +} + +static int +yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data) +{ + struct yt921x_priv *priv = mbus->priv; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_extif_write(priv, port, reg, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp) +{ + struct device *dev = to_device(priv); + struct mii_bus *mbus; + int res; + + mbus = devm_mdiobus_alloc(dev); + if (!mbus) + return -ENOMEM; + + mbus->name = "YT921x external MDIO bus"; + snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev)); + mbus->priv = priv; + /* TODO: c45? */ + mbus->read = yt921x_mbus_ext_read; + mbus->write = yt921x_mbus_ext_write; + mbus->parent = dev; + + res = devm_of_mdiobus_register(dev, mbus, mnp); + if (res) + return res; + + priv->mbus_ext = mbus; + + return 0; +} + +/* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */ +static int yt921x_read_mib(struct yt921x_priv *priv, int port) +{ + struct yt921x_port *pp = &priv->ports[port]; + struct device *dev = to_device(priv); + struct yt921x_mib *mib = &pp->mib; + int res = 0; + + /* Reading of yt921x_port::mib is not protected by a lock and it's vain + * to keep its consistency, since we have to read registers one by one + * and there is no way to make a snapshot of MIB stats. + * + * Writing (by this function only) is and should be protected by + * reg_lock. + */ + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; + u64 *valp = &((u64 *)mib)[i]; + u64 val = *valp; + u32 val0; + u32 val1; + + res = yt921x_reg_read(priv, reg, &val0); + if (res) + break; + + if (desc->size <= 1) { + if (val < (u32)val) + /* overflow */ + val += (u64)U32_MAX + 1; + val &= ~U32_MAX; + val |= val0; + } else { + res = yt921x_reg_read(priv, reg + 4, &val1); + if (res) + break; + val = ((u64)val0 << 32) | val1; + } + + WRITE_ONCE(*valp, val); + } + + pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte + + mib->rx_128_255byte + mib->rx_256_511byte + + mib->rx_512_1023byte + mib->rx_1024_1518byte + + mib->rx_jumbo; + pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte + + mib->tx_128_255byte + mib->tx_256_511byte + + mib->tx_512_1023byte + mib->tx_1024_1518byte + + mib->tx_jumbo; + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "read stats for", + port, res); + return res; +} + +static void yt921x_poll_mib(struct work_struct *work) +{ + struct yt921x_port *pp = container_of_const(work, struct yt921x_port, + mib_read.work); + struct yt921x_priv *priv = (void *)(pp - pp->index) - + offsetof(struct yt921x_priv, ports); + unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES; + int port = pp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + if (res) + delay *= 4; + + schedule_delayed_work(&pp->mib_read, delay); +} + +static void +yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + if (stringset != ETH_SS_STATS) + return; + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (desc->name) + ethtool_puts(&data, desc->name); + } +} + +static void +yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + size_t j; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + j = 0; + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (!desc->name) + continue; + + data[j] = ((u64 *)mib)[i]; + j++; + } +} + +static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + int cnt = 0; + + if (sset != ETH_SS_STATS) + return 0; + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (desc->name) + cnt++; + } + + return cnt; +} + +static void +yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + mac_stats->FramesTransmittedOK = pp->tx_frames; + mac_stats->SingleCollisionFrames = mib->tx_single_collisions; + mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions; + mac_stats->FramesReceivedOK = pp->rx_frames; + mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors; + mac_stats->AlignmentErrors = mib->rx_alignment_errors; + mac_stats->OctetsTransmittedOK = mib->tx_good_bytes; + mac_stats->FramesWithDeferredXmissions = mib->tx_deferred; + mac_stats->LateCollisions = mib->tx_late_collisions; + mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors; + /* mac_stats->FramesLostDueToIntMACXmitError */ + /* mac_stats->CarrierSenseErrors */ + mac_stats->OctetsReceivedOK = mib->rx_good_bytes; + /* mac_stats->FramesLostDueToIntMACRcvError */ + mac_stats->MulticastFramesXmittedOK = mib->tx_multicast; + mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast; + /* mac_stats->FramesWithExcessiveDeferral */ + mac_stats->MulticastFramesReceivedOK = mib->rx_multicast; + mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast; + /* mac_stats->InRangeLengthErrors */ + /* mac_stats->OutOfRangeLengthField */ + mac_stats->FrameTooLongErrors = mib->rx_oversize_errors; +} + +static void +yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + ctrl_stats->MACControlFramesTransmitted = mib->tx_pause; + ctrl_stats->MACControlFramesReceived = mib->rx_pause; + /* ctrl_stats->UnsupportedOpcodesReceived */ +} + +static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, YT921X_FRAME_SIZE_MAX }, + {} +}; + +static void +yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + *ranges = yt921x_rmon_ranges; + + rmon_stats->undersize_pkts = mib->rx_undersize_errors; + rmon_stats->oversize_pkts = mib->rx_oversize_errors; + rmon_stats->fragments = mib->rx_alignment_errors; + /* rmon_stats->jabbers */ + + rmon_stats->hist[0] = mib->rx_64byte; + rmon_stats->hist[1] = mib->rx_65_127byte; + rmon_stats->hist[2] = mib->rx_128_255byte; + rmon_stats->hist[3] = mib->rx_256_511byte; + rmon_stats->hist[4] = mib->rx_512_1023byte; + rmon_stats->hist[5] = mib->rx_1024_1518byte; + rmon_stats->hist[6] = mib->rx_jumbo; + + rmon_stats->hist_tx[0] = mib->tx_64byte; + rmon_stats->hist_tx[1] = mib->tx_65_127byte; + rmon_stats->hist_tx[2] = mib->tx_128_255byte; + rmon_stats->hist_tx[3] = mib->tx_256_511byte; + rmon_stats->hist_tx[4] = mib->tx_512_1023byte; + rmon_stats->hist_tx[5] = mib->tx_1024_1518byte; + rmon_stats->hist_tx[6] = mib->tx_jumbo; +} + +static void +yt921x_dsa_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + stats->rx_length_errors = mib->rx_undersize_errors + + mib->rx_fragment_errors; + stats->rx_over_errors = mib->rx_oversize_errors; + stats->rx_crc_errors = mib->rx_crc_errors; + stats->rx_frame_errors = mib->rx_alignment_errors; + /* stats->rx_fifo_errors */ + /* stats->rx_missed_errors */ + + stats->tx_aborted_errors = mib->tx_aborted_errors; + /* stats->tx_carrier_errors */ + stats->tx_fifo_errors = mib->tx_undersize_errors; + /* stats->tx_heartbeat_errors */ + stats->tx_window_errors = mib->tx_late_collisions; + + stats->rx_packets = pp->rx_frames; + stats->tx_packets = pp->tx_frames; + stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets; + stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets; + stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors + + stats->rx_crc_errors + stats->rx_frame_errors; + stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors + + stats->tx_window_errors; + stats->rx_dropped = mib->rx_dropped; + /* stats->tx_dropped */ + stats->multicast = mib->rx_multicast; + stats->collisions = mib->tx_collisions; +} + +static void +yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + pause_stats->tx_pause_frames = mib->tx_pause; + pause_stats->rx_pause_frames = mib->rx_pause; +} + +static int +yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e) +{ + /* Poor datasheet for EEE operations; don't ask if you are confused */ + + bool enable = e->eee_enabled; + u16 new_mask; + int res; + + /* Enable / disable global EEE */ + new_mask = priv->eee_ports_mask; + new_mask &= ~BIT(port); + new_mask |= !enable ? 0 : BIT(port); + + if (!!new_mask != !!priv->eee_ports_mask) { + res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC, + YT921X_PON_STRAP_EEE, !!new_mask); + if (res) + return res; + res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL, + YT921X_PON_STRAP_EEE, !!new_mask); + if (res) + return res; + } + + priv->eee_ports_mask = new_mask; + + /* Enable / disable port EEE */ + res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL, + YT921X_EEE_CTRL_ENn(port), enable); + if (res) + return res; + res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port), + YT921X_EEE_VAL_DATA, enable); + if (res) + return res; + + return 0; +} + +static int +yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_set_eee(priv, port, e); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + /* Only serves as packet filter, since the frame size is always set to + * maximum after reset + */ + + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct dsa_port *dp = dsa_to_port(ds, port); + int frame_size; + int res; + + frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (dsa_port_is_cpu(dp)) + frame_size += YT921X_TAG_LEN; + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port), + YT921X_MAC_FRAME_SIZE_M, + YT921X_MAC_FRAME_SIZE(frame_size)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port) +{ + /* Only called for user ports, exclude tag len here */ + return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN; +} + +static int +yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress) +{ + u32 mask; + + if (ingress) + mask = YT921X_MIRROR_IGR_PORTn(port); + else + mask = YT921X_MIRROR_EGR_PORTn(port); + return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask); +} + +static int +yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress, + int to_local_port, struct netlink_ext_ack *extack) +{ + u32 srcs; + u32 ctrl; + u32 val; + u32 dst; + int res; + + if (ingress) + srcs = YT921X_MIRROR_IGR_PORTn(port); + else + srcs = YT921X_MIRROR_EGR_PORTn(port); + dst = YT921X_MIRROR_PORT(to_local_port); + + res = yt921x_reg_read(priv, YT921X_MIRROR, &val); + if (res) + return res; + + /* other mirror tasks & different dst port -> conflict */ + if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M | + YT921X_MIRROR_IGR_PORTS_M)) && + (val & YT921X_MIRROR_PORT_M) != dst) { + NL_SET_ERR_MSG_MOD(extack, + "Sniffer port is already configured, delete existing rules & retry"); + return -EBUSY; + } + + ctrl = val & ~YT921X_MIRROR_PORT_M; + ctrl |= srcs; + ctrl |= dst; + + if (ctrl == val) + return 0; + + return yt921x_reg_write(priv, YT921X_MIRROR, ctrl); +} + +static void +yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_mirror_del(priv, port, mirror->ingress); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "unmirror", + port, res); +} + +static int +yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_mirror_add(priv, port, ingress, + mirror->to_local_port, extack); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp) +{ + struct device *dev = to_device(priv); + u32 val = YT921X_FDB_RESULT_DONE; + int res; + + res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE, + &val); + if (res) { + dev_err(dev, "FDB probably stuck\n"); + return res; + } + + *valp = val; + return 0; +} + +static int +yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr, + u16 vid, u32 ctrl1) +{ + u32 ctrl; + int res; + + ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl); + if (res) + return res; + + ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5]; + return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); +} + +static int +yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 *indexp) +{ + u32 ctrl; + u32 val; + int res; + + res = yt921x_fdb_in01(priv, addr, vid, 0); + if (res) + return res; + + ctrl = 0; + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + if (val & YT921X_FDB_RESULT_NOTFOUND) { + *indexp = YT921X_FDB_NUM; + return 0; + } + + *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); + return 0; +} + +static int +yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp, + u16 *ports_maskp, u16 *indexp, u8 *statusp) +{ + struct device *dev = to_device(priv); + u16 index; + u32 data0; + u32 data1; + u32 data2; + u32 val; + int res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + if (val & YT921X_FDB_RESULT_NOTFOUND) { + *ports_maskp = 0; + return 0; + } + index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); + + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1); + if (res) + return res; + if ((data1 & YT921X_FDB_IO1_STATUS_M) == + YT921X_FDB_IO1_STATUS_INVALID) { + *ports_maskp = 0; + return 0; + } + + res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2); + if (res) + return res; + + addr[0] = data0 >> 24; + addr[1] = data0 >> 16; + addr[2] = data0 >> 8; + addr[3] = data0; + addr[4] = data1 >> 8; + addr[5] = data1; + *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1); + *indexp = index; + *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2); + *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1); + + dev_dbg(dev, + "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n", + __func__, *indexp, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], *vidp, *ports_maskp, *statusp); + return 0; +} + +static int +yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask, + dsa_fdb_dump_cb_t *cb, void *data) +{ + unsigned char addr[ETH_ALEN]; + u8 status; + u16 pmask; + u16 index; + u32 ctrl; + u16 vid; + int res; + + ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status); + if (res) + return res; + if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { + res = cb(addr, vid, + status == YT921X_FDB_ENTRY_STATUS_STATIC, data); + if (res) + return res; + } + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + index = 0; + do { + ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT | + YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, + &status); + if (res) + return res; + if (!pmask) + break; + + if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { + res = cb(addr, vid, + status == YT921X_FDB_ENTRY_STATUS_STATIC, + data); + if (res) + return res; + } + + /* Never call GET_NEXT with 4095, otherwise it will hang + * forever until a reset! + */ + } while (index < YT921X_FDB_NUM - 1); + + return 0; +} + +static int +yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid, + bool flush_static) +{ + u32 ctrl; + u32 val; + int res; + + if (vid < 4096) { + ctrl = YT921X_FDB_IO1_FID(vid); + res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); + if (res) + return res; + } + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START; + if (vid >= 4096) + ctrl |= YT921X_FDB_OP_FLUSH_PORT; + else + ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID; + if (flush_static) + ctrl |= YT921X_FDB_OP_FLUSH_STATIC; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + + return 0; +} + +static int +yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static) +{ + return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static); +} + +static int +yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1, + u16 ctrl2) +{ + u32 ctrl; + u32 val; + int res; + + res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2); + if (res) + return res; + + ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); +} + +static int +yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 ports_mask) +{ + u32 ctrl; + u32 val; + int res; + + ctrl = YT921X_FDB_IO1_STATUS_STATIC; + res = yt921x_fdb_in01(priv, addr, vid, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); +} + +static int +yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr, + u16 vid, u16 ports_mask) +{ + u16 index; + u32 ctrl1; + u32 ctrl2; + u32 ctrl; + u32 val2; + u32 val; + int res; + + /* Check for presence */ + res = yt921x_fdb_has(priv, addr, vid, &index); + if (res) + return res; + if (index >= YT921X_FDB_NUM) + return 0; + + /* Check if action required */ + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); + if (res) + return res; + + ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask); + if (ctrl2 == val2) + return 0; + if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) { + ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); + } + + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1); + if (res) + return res; + + return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); +} + +static int +yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 ports_mask) +{ + u16 index; + u32 ctrl1; + u32 ctrl2; + u32 val1; + u32 val2; + int res; + + /* Check for presence */ + res = yt921x_fdb_has(priv, addr, vid, &index); + if (res) + return res; + if (index >= YT921X_FDB_NUM) + return yt921x_fdb_add(priv, addr, vid, ports_mask); + + /* Check if action required */ + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); + if (res) + return res; + + ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M; + ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC; + ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask); + if (ctrl1 == val1 && ctrl2 == val2) + return 0; + + return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); +} + +static int +yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + res = yt921x_fdb_dump(priv, BIT(port), cb, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_flush_port(priv, port, false); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for", + port, res); +} + +static int +yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u32 ctrl; + int res; + + /* AGEING reg is set in 5s step */ + ctrl = clamp(msecs / 5000, 1, U16_MAX); + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_write(priv, YT921X_AGEING, ctrl); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_join(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const unsigned char *addr = mdb->addr; + u16 vid = mdb->vid; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const unsigned char *addr = mdb->addr; + u16 vid = mdb->vid; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_join(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid) +{ + u32 mask; + u32 ctrl; + + mask = YT921X_PORT_VLAN_CTRL_CVID_M; + ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid); + return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port), + mask, ctrl); +} + +static int +yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + struct net_device *bdev; + u16 pvid; + u32 mask; + u32 ctrl; + int res; + + bdev = dsa_port_bridge_dev_get(dp); + + if (!bdev || !vlan_filtering) + pvid = YT921X_VID_UNWARE; + else + br_vlan_get_pvid(bdev, &pvid); + res = yt921x_port_set_pvid(priv, port, pvid); + if (res) + return res; + + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED | + YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + ctrl = 0; + /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */ + if (vlan_filtering && !pvid) + ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), + mask, ctrl); + if (res) + return res; + + res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER, + YT921X_VLAN_IGR_FILTER_PORTn(port), + vlan_filtering); + if (res) + return res; + + /* Turn on / off VLAN awareness */ + mask = YT921X_PORT_IGR_TPIDn_CTAG_M; + if (!vlan_filtering) + ctrl = 0; + else + ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0); + res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port), + mask, ctrl); + if (res) + return res; + + return 0; +} + +static int +yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid) +{ + u64 mask64; + + mask64 = YT921X_VLAN_CTRL_PORTS(port) | + YT921X_VLAN_CTRL_UNTAG_PORTn(port); + + return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64); +} + +static int +yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged) +{ + u64 mask64; + u64 ctrl64; + + mask64 = YT921X_VLAN_CTRL_PORTn(port) | + YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask); + ctrl64 = mask64; + + mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); + if (untagged) + ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); + + return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid), + mask64, ctrl64); +} + +static int +yt921x_pvid_clear(struct yt921x_priv *priv, int port) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + bool vlan_filtering; + u32 mask; + int res; + + vlan_filtering = dsa_port_is_vlan_filtering(dp); + + res = yt921x_port_set_pvid(priv, port, + vlan_filtering ? 0 : YT921X_VID_UNWARE); + if (res) + return res; + + if (vlan_filtering) { + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), + mask); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + bool vlan_filtering; + u32 mask; + int res; + + vlan_filtering = dsa_port_is_vlan_filtering(dp); + + if (vlan_filtering) { + res = yt921x_port_set_pvid(priv, port, vid); + if (res) + return res; + } + + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask); + if (res) + return res; + + return 0; +} + +static int +yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + res = yt921x_vlan_filtering(priv, port, vlan_filtering); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 vid = vlan->vid; + u16 pvid; + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + do { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + res = yt921x_vlan_del(priv, port, vid); + if (res) + break; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + br_vlan_get_pvid(bdev, &pvid); + if (pvid == vid) + res = yt921x_pvid_clear(priv, port); + } + } while (0); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 vid = vlan->vid; + u16 pvid; + int res; + + /* CPU port is supposed to be a member of every VLAN; see + * yt921x_vlan_add() and yt921x_port_setup() + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + do { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + res = yt921x_vlan_add(priv, port, vid, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (res) + break; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + res = yt921x_pvid_set(priv, port, vid); + } else { + br_vlan_get_pvid(bdev, &pvid); + if (pvid == vid) + res = yt921x_pvid_clear(priv, port); + } + } + } while (0); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_userport_standalone(struct yt921x_priv *priv, int port) +{ + u32 mask; + u32 ctrl; + int res; + + ctrl = ~priv->cpu_ports_mask; + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl); + if (res) + return res; + + /* Turn off FDB learning to prevent FDB pollution */ + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask); + if (res) + return res; + + /* Turn off VLAN awareness */ + mask = YT921X_PORT_IGR_TPIDn_CTAG_M; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask); + if (res) + return res; + + /* Unrelated since learning is off and all packets are trapped; + * set it anyway + */ + res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE); + if (res) + return res; + + return 0; +} + +static int yt921x_userport_bridge(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask); + if (res) + return res; + + return 0; +} + +static int yt921x_isolate(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = BIT(port); + for (int i = 0; i < YT921X_PORT_NUM; i++) { + if ((BIT(i) & priv->cpu_ports_mask) || i == port) + continue; + + res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i), + mask); + if (res) + return res; + } + + return 0; +} + +/* Make sure to include the CPU port in ports_mask, or your bridge will + * not have it. + */ +static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask) +{ + unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask; + u32 isolated_mask; + u32 ctrl; + int port; + int res; + + isolated_mask = 0; + for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { + struct yt921x_port *pp = &priv->ports[port]; + + if (pp->isolated) + isolated_mask |= BIT(port); + } + + /* Block from non-cpu bridge ports ... */ + for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { + struct yt921x_port *pp = &priv->ports[port]; + + /* to non-bridge ports */ + ctrl = ~ports_mask; + /* to isolated ports when isolated */ + if (pp->isolated) + ctrl |= isolated_mask; + /* to itself when non-hairpin */ + if (!pp->hairpin) + ctrl |= BIT(port); + else + ctrl &= ~BIT(port); + + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), + ctrl); + if (res) + return res; + } + + return 0; +} + +static int yt921x_bridge_leave(struct yt921x_priv *priv, int port) +{ + int res; + + res = yt921x_userport_standalone(priv, port); + if (res) + return res; + + res = yt921x_isolate(priv, port); + if (res) + return res; + + return 0; +} + +static int +yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask) +{ + int res; + + res = yt921x_userport_bridge(priv, port); + if (res) + return res; + + res = yt921x_bridge(priv, ports_mask); + if (res) + return res; + + return 0; +} + +static u32 +dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev) +{ + struct dsa_port *dp; + u32 mask = 0; + + dsa_switch_for_each_user_port(dp, ds) + if (dsa_port_offloads_bridge_dev(dp, bdev)) + mask |= BIT(dp->index); + + return mask; +} + +static int +yt921x_bridge_flags(struct yt921x_priv *priv, int port, + struct switchdev_brport_flags flags) +{ + struct yt921x_port *pp = &priv->ports[port]; + bool do_flush; + u32 mask; + int res; + + if (flags.mask & BR_LEARNING) { + bool learning = flags.val & BR_LEARNING; + + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port), + mask, !learning); + if (res) + return res; + } + + /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP + * is set + */ + + /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */ + + do_flush = false; + if (flags.mask & BR_HAIRPIN_MODE) { + pp->hairpin = flags.val & BR_HAIRPIN_MODE; + do_flush = true; + } + if (flags.mask & BR_ISOLATED) { + pp->isolated = flags.val & BR_ISOLATED; + do_flush = true; + } + if (do_flush) { + struct dsa_switch *ds = &priv->ds; + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + u32 ports_mask; + + ports_mask = dsa_bridge_ports(ds, bdev); + ports_mask |= priv->cpu_ports_mask; + res = yt921x_bridge(priv, ports_mask); + if (res) + return res; + } + } + + return 0; +} + +static int +yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD | + BR_MCAST_FLOOD | BR_ISOLATED)) + return -EINVAL; + return 0; +} + +static int +yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_flags(priv, port, flags); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void +yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + if (dsa_is_cpu_port(ds, port)) + return; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_leave(priv, port); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "unbridge", + port, res); +} + +static int +yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 ports_mask; + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + ports_mask = dsa_bridge_ports(ds, bridge.dev); + ports_mask |= priv->cpu_ports_mask; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_join(priv, port, ports_mask); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_port_down(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask); + if (res) + return res; + + if (yt921x_port_is_external(port)) { + mask = YT921X_SERDES_LINK; + res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask); + if (res) + return res; + + mask = YT921X_XMII_LINK; + res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + u32 mask; + u32 ctrl; + int res; + + switch (speed) { + case SPEED_10: + ctrl = YT921X_PORT_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_PORT_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_PORT_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_PORT_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_PORT_SPEED_10000; + break; + default: + return -EINVAL; + } + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_PORT_DUPLEX_FULL; + if (tx_pause) + ctrl |= YT921X_PORT_TX_PAUSE; + if (rx_pause) + ctrl |= YT921X_PORT_RX_PAUSE; + ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; + res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl); + if (res) + return res; + + if (yt921x_port_is_external(port)) { + mask = YT921X_SERDES_SPEED_M; + switch (speed) { + case SPEED_10: + ctrl = YT921X_SERDES_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_SERDES_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_SERDES_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_SERDES_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_SERDES_SPEED_10000; + break; + default: + return -EINVAL; + } + mask |= YT921X_SERDES_DUPLEX_FULL; + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_SERDES_DUPLEX_FULL; + mask |= YT921X_SERDES_TX_PAUSE; + if (tx_pause) + ctrl |= YT921X_SERDES_TX_PAUSE; + mask |= YT921X_SERDES_RX_PAUSE; + if (rx_pause) + ctrl |= YT921X_SERDES_RX_PAUSE; + mask |= YT921X_SERDES_LINK; + ctrl |= YT921X_SERDES_LINK; + res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), + mask, ctrl); + if (res) + return res; + + mask = YT921X_XMII_LINK; + res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask); + if (res) + return res; + + switch (speed) { + case SPEED_10: + ctrl = YT921X_MDIO_POLLING_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_MDIO_POLLING_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_MDIO_POLLING_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_MDIO_POLLING_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_MDIO_POLLING_SPEED_10000; + break; + default: + return -EINVAL; + } + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL; + ctrl |= YT921X_MDIO_POLLING_LINK; + res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode, + phy_interface_t interface) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + int res; + + if (!yt921x_port_is_external(port)) { + if (interface != PHY_INTERFACE_MODE_INTERNAL) { + dev_err(dev, "Wrong mode %d on port %d\n", + interface, port); + return -EINVAL; + } + return 0; + } + + switch (interface) { + /* SERDES */ + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_100BASEX: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + mask = YT921X_SERDES_CTRL_PORTn(port); + res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask); + if (res) + return res; + + mask = YT921X_XMII_CTRL_PORTn(port); + res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask); + if (res) + return res; + + mask = YT921X_SERDES_MODE_M; + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + ctrl = YT921X_SERDES_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_100BASEX: + ctrl = YT921X_SERDES_MODE_100BASEX; + break; + case PHY_INTERFACE_MODE_1000BASEX: + ctrl = YT921X_SERDES_MODE_1000BASEX; + break; + case PHY_INTERFACE_MODE_2500BASEX: + ctrl = YT921X_SERDES_MODE_2500BASEX; + break; + default: + return -EINVAL; + } + res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), + mask, ctrl); + if (res) + return res; + + break; + /* add XMII support here */ + default: + return -EINVAL; + } + + return 0; +} + +static void +yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + /* No need to sync; port control block is hold until device remove */ + cancel_delayed_work(&priv->ports[port].mib_read); + + mutex_lock(&priv->reg_lock); + res = yt921x_port_down(priv, port); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down", + port, res); +} + +static void +yt921x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_up(priv, port, mode, interface, speed, duplex, + tx_pause, rx_pause); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up", + port, res); + + schedule_delayed_work(&priv->ports[port].mib_read, 0); +} + +static void +yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_config(priv, port, mode, state->interface); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config", + port, res); +} + +static void +yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const struct yt921x_info *info = priv->info; + + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + + if (info->internal_mask & BIT(port)) { + /* Port 10 for MCU should probably go here too. But since that + * is untested yet, turn it down for the moment by letting it + * fall to the default branch. + */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + } else if (info->external_mask & BIT(port)) { + /* TODO: external ports may support SERDES only, XMII only, or + * SERDES + XMII depending on the chip. However, we can't get + * the accurate config table due to lack of document, thus + * we simply declare SERDES + XMII and rely on the correctness + * of devicetree for now. + */ + + /* SERDES */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + /* REVSGMII (SGMII in PHY role) should go here, once + * PHY_INTERFACE_MODE_REVSGMII is introduced. + */ + __set_bit(PHY_INTERFACE_MODE_100BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + config->mac_capabilities |= MAC_2500FD; + + /* XMII */ + + /* Not tested. To add support for XMII: + * - Add proper interface modes below + * - Handle them in yt921x_port_config() + */ + } + /* no such port: empty supported_interfaces causes phylink to turn it + * down + */ +} + +static int yt921x_port_setup(struct yt921x_priv *priv, int port) +{ + struct dsa_switch *ds = &priv->ds; + u32 ctrl; + int res; + + res = yt921x_userport_standalone(priv, port); + if (res) + return res; + + if (dsa_is_cpu_port(ds, port)) { + /* Egress of CPU port is supposed to be completely controlled + * via tagging, so set to oneway isolated (drop all packets + * without tag). + */ + ctrl = ~(u32)0; + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), + ctrl); + if (res) + return res; + + /* To simplify FDB "isolation" simulation, we also disable + * learning on the CPU port, and let software identify packets + * towarding CPU (either trapped or a static FDB entry is + * matched, no matter which bridge that entry is for), which is + * already done by yt921x_userport_standalone(). As a result, + * VLAN-awareness becomes unrelated on the CPU port (set to + * VLAN-unaware by the way). + */ + } + + return 0; +} + +static enum dsa_tag_protocol +yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol m) +{ + return DSA_TAG_PROTO_YT921X; +} + +static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_setup(priv, port); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp) +{ + u32 val = YT921X_EDATA_DATA_IDLE; + int res; + + res = yt921x_reg_wait(priv, YT921X_EDATA_DATA, + YT921X_EDATA_DATA_STATUS_M, &val); + if (res) + return res; + + *valp = val; + return 0; +} + +static int +yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp) +{ + u32 ctrl; + u32 val; + int res; + + ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ; + res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl); + if (res) + return res; + res = yt921x_edata_wait(priv, &val); + if (res) + return res; + + *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val); + return 0; +} + +static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp) +{ + u32 val; + int res; + + res = yt921x_edata_wait(priv, &val); + if (res) + return res; + return yt921x_edata_read_cont(priv, addr, valp); +} + +static int yt921x_chip_detect(struct yt921x_priv *priv) +{ + struct device *dev = to_device(priv); + const struct yt921x_info *info; + u8 extmode; + u32 chipid; + u32 major; + u32 mode; + int res; + + res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid); + if (res) + return res; + + major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid); + + for (info = yt921x_infos; info->name; info++) + if (info->major == major) + break; + if (!info->name) { + dev_err(dev, "Unexpected chipid 0x%x\n", chipid); + return -ENODEV; + } + + res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode); + if (res) + return res; + res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode); + if (res) + return res; + + for (; info->name; info++) + if (info->major == major && info->mode == mode && + info->extmode == extmode) + break; + if (!info->name) { + dev_err(dev, + "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n", + chipid, mode, extmode); + return -ENODEV; + } + + /* Print chipid here since we are interested in lower 16 bits */ + dev_info(dev, + "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n", + info->name, chipid, mode, extmode); + + priv->info = info; + return 0; +} + +static int yt921x_chip_reset(struct yt921x_priv *priv) +{ + struct device *dev = to_device(priv); + u16 eth_p_tag; + u32 val; + int res; + + res = yt921x_chip_detect(priv); + if (res) + return res; + + /* Reset */ + res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW); + if (res) + return res; + + /* RST_HW is almost same as GPIO hard reset, so we need this delay. */ + fsleep(YT921X_RST_DELAY_US); + + val = 0; + res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val); + if (res) + return res; + + /* Check for tag EtherType; do it after reset in case you messed it up + * before. + */ + res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val); + if (res) + return res; + eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val); + if (eth_p_tag != ETH_P_YT921X) { + dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag, + ETH_P_YT921X); + /* Despite being possible, we choose not to set CPU_TAG_TPID, + * since there is no way it can be different unless you have the + * wrong chip. + */ + return -EINVAL; + } + + return 0; +} + +static int yt921x_chip_setup(struct yt921x_priv *priv) +{ + struct dsa_switch *ds = &priv->ds; + unsigned long cpu_ports_mask; + u64 ctrl64; + u32 ctrl; + int port; + int res; + + /* Enable DSA */ + priv->cpu_ports_mask = dsa_cpu_ports(ds); + + ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN | + YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask)); + res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl); + if (res) + return res; + + /* Enable and clear MIB */ + res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB); + if (res) + return res; + + ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT; + res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl); + if (res) + return res; + + /* Setup software switch */ + ctrl = YT921X_CPU_COPY_TO_EXT_CPU; + res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl); + if (res) + return res; + + ctrl = GENMASK(10, 0); + res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl); + if (res) + return res; + + /* YT921x does not support native DSA port bridging, so we use port + * isolation to emulate it. However, be especially careful that port + * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from + * another bridge) is matched and the destination port (in another + * bridge) is blocked, the packet will be dropped instead of flooding to + * the "bridged" ports, thus we need to trap and handle those packets by + * software. + * + * If there is no more than one bridge, we might be able to drop them + * directly given some conditions are met, but we trap them in all cases + * for now. + */ + ctrl = 0; + for (int i = 0; i < YT921X_PORT_NUM; i++) + ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i); + /* Except for CPU ports, if any packets are sent via CPU ports without + * tag, they should be dropped. + */ + cpu_ports_mask = priv->cpu_ports_mask; + for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) { + ctrl &= ~YT921X_ACT_UNK_ACTn_M(port); + ctrl |= YT921X_ACT_UNK_ACTn_DROP(port); + } + res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl); + if (res) + return res; + + /* Tagged VID 0 should be treated as untagged, which confuses the + * hardware a lot + */ + ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M; + res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64); + if (res) + return res; + + /* Miscellaneous */ + res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP); + if (res) + return res; + + return 0; +} + +static int yt921x_dsa_setup(struct dsa_switch *ds) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + struct device_node *np = dev->of_node; + struct device_node *child; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_chip_reset(priv); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + + /* Register the internal mdio bus. Nodes for internal ports should have + * proper phy-handle pointing to their PHYs. Not enabling the internal + * bus is possible, though pretty wired, if internal ports are not used. + */ + child = of_get_child_by_name(np, "mdio"); + if (child) { + res = yt921x_mbus_int_init(priv, child); + of_node_put(child); + if (res) + return res; + } + + /* External mdio bus is optional */ + child = of_get_child_by_name(np, "mdio-external"); + if (child) { + res = yt921x_mbus_ext_init(priv, child); + of_node_put(child); + if (res) + return res; + + dev_err(dev, "Untested external mdio bus\n"); + return -ENODEV; + } + + mutex_lock(&priv->reg_lock); + res = yt921x_chip_setup(priv); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + + return 0; +} + +static const struct phylink_mac_ops yt921x_phylink_mac_ops = { + .mac_link_down = yt921x_phylink_mac_link_down, + .mac_link_up = yt921x_phylink_mac_link_up, + .mac_config = yt921x_phylink_mac_config, +}; + +static const struct dsa_switch_ops yt921x_dsa_switch_ops = { + /* mib */ + .get_strings = yt921x_dsa_get_strings, + .get_ethtool_stats = yt921x_dsa_get_ethtool_stats, + .get_sset_count = yt921x_dsa_get_sset_count, + .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats, + .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats, + .get_rmon_stats = yt921x_dsa_get_rmon_stats, + .get_stats64 = yt921x_dsa_get_stats64, + .get_pause_stats = yt921x_dsa_get_pause_stats, + /* eee */ + .support_eee = dsa_supports_eee, + .set_mac_eee = yt921x_dsa_set_mac_eee, + /* mtu */ + .port_change_mtu = yt921x_dsa_port_change_mtu, + .port_max_mtu = yt921x_dsa_port_max_mtu, + /* mirror */ + .port_mirror_del = yt921x_dsa_port_mirror_del, + .port_mirror_add = yt921x_dsa_port_mirror_add, + /* fdb */ + .port_fdb_dump = yt921x_dsa_port_fdb_dump, + .port_fast_age = yt921x_dsa_port_fast_age, + .set_ageing_time = yt921x_dsa_set_ageing_time, + .port_fdb_del = yt921x_dsa_port_fdb_del, + .port_fdb_add = yt921x_dsa_port_fdb_add, + .port_mdb_del = yt921x_dsa_port_mdb_del, + .port_mdb_add = yt921x_dsa_port_mdb_add, + /* vlan */ + .port_vlan_filtering = yt921x_dsa_port_vlan_filtering, + .port_vlan_del = yt921x_dsa_port_vlan_del, + .port_vlan_add = yt921x_dsa_port_vlan_add, + /* bridge */ + .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags, + .port_bridge_flags = yt921x_dsa_port_bridge_flags, + .port_bridge_leave = yt921x_dsa_port_bridge_leave, + .port_bridge_join = yt921x_dsa_port_bridge_join, + /* port */ + .get_tag_protocol = yt921x_dsa_get_tag_protocol, + .phylink_get_caps = yt921x_dsa_phylink_get_caps, + .port_setup = yt921x_dsa_port_setup, + /* chip */ + .setup = yt921x_dsa_setup, +}; + +static void yt921x_mdio_shutdown(struct mdio_device *mdiodev) +{ + struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); + + if (!priv) + return; + + dsa_switch_shutdown(&priv->ds); +} + +static void yt921x_mdio_remove(struct mdio_device *mdiodev) +{ + struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); + + if (!priv) + return; + + for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) { + struct yt921x_port *pp = &priv->ports[i]; + + disable_delayed_work_sync(&pp->mib_read); + } + + dsa_unregister_switch(&priv->ds); + + mutex_destroy(&priv->reg_lock); +} + +static int yt921x_mdio_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct yt921x_reg_mdio *mdio; + struct yt921x_priv *priv; + struct dsa_switch *ds; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL); + if (!mdio) + return -ENOMEM; + + mdio->bus = mdiodev->bus; + mdio->addr = mdiodev->addr; + mdio->switchid = 0; + + mutex_init(&priv->reg_lock); + + priv->reg_ops = &yt921x_reg_ops_mdio; + priv->reg_ctx = mdio; + + for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) { + struct yt921x_port *pp = &priv->ports[i]; + + pp->index = i; + INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib); + } + + ds = &priv->ds; + ds->dev = dev; + ds->assisted_learning_on_cpu_port = true; + ds->priv = priv; + ds->ops = &yt921x_dsa_switch_ops; + ds->phylink_mac_ops = &yt921x_phylink_mac_ops; + ds->num_ports = YT921X_PORT_NUM; + + mdiodev_set_drvdata(mdiodev, priv); + + return dsa_register_switch(ds); +} + +static const struct of_device_id yt921x_of_match[] = { + { .compatible = "motorcomm,yt9215" }, + {} +}; +MODULE_DEVICE_TABLE(of, yt921x_of_match); + +static struct mdio_driver yt921x_mdio_driver = { + .probe = yt921x_mdio_probe, + .remove = yt921x_mdio_remove, + .shutdown = yt921x_mdio_shutdown, + .mdiodrv.driver = { + .name = YT921X_NAME, + .of_match_table = yt921x_of_match, + }, +}; + +mdio_module_driver(yt921x_mdio_driver); + +MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>"); +MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h new file mode 100644 index 000000000000..3e85d90826fb --- /dev/null +++ b/drivers/net/dsa/yt921x.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 David Yang + */ + +#ifndef __YT921X_H +#define __YT921X_H + +#include <net/dsa.h> + +#define YT921X_SMI_SWITCHID_M GENMASK(3, 2) +#define YT921X_SMI_SWITCHID(x) FIELD_PREP(YT921X_SMI_SWITCHID_M, (x)) +#define YT921X_SMI_AD BIT(1) +#define YT921X_SMI_ADDR 0 +#define YT921X_SMI_DATA YT921X_SMI_AD +#define YT921X_SMI_RW BIT(0) +#define YT921X_SMI_WRITE 0 +#define YT921X_SMI_READ YT921X_SMI_RW + +#define YT921X_SWITCHID_NUM 4 + +#define YT921X_RST 0x80000 +#define YT921X_RST_HW BIT(31) +#define YT921X_RST_SW BIT(1) +#define YT921X_FUNC 0x80004 +#define YT921X_FUNC_MIB BIT(1) +#define YT921X_CHIP_ID 0x80008 +#define YT921X_CHIP_ID_MAJOR GENMASK(31, 16) +#define YT921X_EXT_CPU_PORT 0x8000c +#define YT921X_EXT_CPU_PORT_TAG_EN BIT(15) +#define YT921X_EXT_CPU_PORT_PORT_EN BIT(14) +#define YT921X_EXT_CPU_PORT_PORT_M GENMASK(3, 0) +#define YT921X_EXT_CPU_PORT_PORT(x) FIELD_PREP(YT921X_EXT_CPU_PORT_PORT_M, (x)) +#define YT921X_CPU_TAG_TPID 0x80010 +#define YT921X_CPU_TAG_TPID_TPID_M GENMASK(15, 0) +/* Same as ETH_P_YT921X, but this represents the true HW default, while the + * former is a local convention chosen by us. + */ +#define YT921X_CPU_TAG_TPID_TPID_DEFAULT 0x9988 +#define YT921X_PVID_SEL 0x80014 +#define YT921X_PVID_SEL_SVID_PORTn(port) BIT(port) +#define YT921X_SERDES_CTRL 0x80028 +#define YT921X_SERDES_CTRL_PORTn_TEST(port) BIT((port) - 3) +#define YT921X_SERDES_CTRL_PORTn(port) BIT((port) - 8) +#define YT921X_IO_LEVEL 0x80030 +#define YT9215_IO_LEVEL_NORMAL_M GENMASK(5, 4) +#define YT9215_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9215_IO_LEVEL_NORMAL_M, (x)) +#define YT9215_IO_LEVEL_NORMAL_3V3 YT9215_IO_LEVEL_NORMAL(0) +#define YT9215_IO_LEVEL_NORMAL_1V8 YT9215_IO_LEVEL_NORMAL(3) +#define YT9215_IO_LEVEL_RGMII1_M GENMASK(3, 2) +#define YT9215_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII1_M, (x)) +#define YT9215_IO_LEVEL_RGMII1_3V3 YT9215_IO_LEVEL_RGMII1(0) +#define YT9215_IO_LEVEL_RGMII1_2V5 YT9215_IO_LEVEL_RGMII1(1) +#define YT9215_IO_LEVEL_RGMII1_1V8 YT9215_IO_LEVEL_RGMII1(2) +#define YT9215_IO_LEVEL_RGMII0_M GENMASK(1, 0) +#define YT9215_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII0_M, (x)) +#define YT9215_IO_LEVEL_RGMII0_3V3 YT9215_IO_LEVEL_RGMII0(0) +#define YT9215_IO_LEVEL_RGMII0_2V5 YT9215_IO_LEVEL_RGMII0(1) +#define YT9215_IO_LEVEL_RGMII0_1V8 YT9215_IO_LEVEL_RGMII0(2) +#define YT9218_IO_LEVEL_RGMII1_M GENMASK(5, 4) +#define YT9218_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII1_M, (x)) +#define YT9218_IO_LEVEL_RGMII1_3V3 YT9218_IO_LEVEL_RGMII1(0) +#define YT9218_IO_LEVEL_RGMII1_2V5 YT9218_IO_LEVEL_RGMII1(1) +#define YT9218_IO_LEVEL_RGMII1_1V8 YT9218_IO_LEVEL_RGMII1(2) +#define YT9218_IO_LEVEL_RGMII0_M GENMASK(3, 2) +#define YT9218_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII0_M, (x)) +#define YT9218_IO_LEVEL_RGMII0_3V3 YT9218_IO_LEVEL_RGMII0(0) +#define YT9218_IO_LEVEL_RGMII0_2V5 YT9218_IO_LEVEL_RGMII0(1) +#define YT9218_IO_LEVEL_RGMII0_1V8 YT9218_IO_LEVEL_RGMII0(2) +#define YT9218_IO_LEVEL_NORMAL_M GENMASK(1, 0) +#define YT9218_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9218_IO_LEVEL_NORMAL_M, (x)) +#define YT9218_IO_LEVEL_NORMAL_3V3 YT9218_IO_LEVEL_NORMAL(0) +#define YT9218_IO_LEVEL_NORMAL_1V8 YT9218_IO_LEVEL_NORMAL(3) +#define YT921X_MAC_ADDR_HI2 0x80080 +#define YT921X_MAC_ADDR_LO4 0x80084 +#define YT921X_SERDESn(port) (0x8008c + 4 * ((port) - 8)) +#define YT921X_SERDES_MODE_M GENMASK(9, 7) +#define YT921X_SERDES_MODE(x) FIELD_PREP(YT921X_SERDES_MODE_M, (x)) +#define YT921X_SERDES_MODE_SGMII YT921X_SERDES_MODE(0) +#define YT921X_SERDES_MODE_REVSGMII YT921X_SERDES_MODE(1) +#define YT921X_SERDES_MODE_1000BASEX YT921X_SERDES_MODE(2) +#define YT921X_SERDES_MODE_100BASEX YT921X_SERDES_MODE(3) +#define YT921X_SERDES_MODE_2500BASEX YT921X_SERDES_MODE(4) +#define YT921X_SERDES_RX_PAUSE BIT(6) +#define YT921X_SERDES_TX_PAUSE BIT(5) +#define YT921X_SERDES_LINK BIT(4) /* force link */ +#define YT921X_SERDES_DUPLEX_FULL BIT(3) +#define YT921X_SERDES_SPEED_M GENMASK(2, 0) +#define YT921X_SERDES_SPEED(x) FIELD_PREP(YT921X_SERDES_SPEED_M, (x)) +#define YT921X_SERDES_SPEED_10 YT921X_SERDES_SPEED(0) +#define YT921X_SERDES_SPEED_100 YT921X_SERDES_SPEED(1) +#define YT921X_SERDES_SPEED_1000 YT921X_SERDES_SPEED(2) +#define YT921X_SERDES_SPEED_10000 YT921X_SERDES_SPEED(3) +#define YT921X_SERDES_SPEED_2500 YT921X_SERDES_SPEED(4) +#define YT921X_PORTn_CTRL(port) (0x80100 + 4 * (port)) +#define YT921X_PORT_CTRL_PAUSE_AN BIT(10) +#define YT921X_PORTn_STATUS(port) (0x80200 + 4 * (port)) +#define YT921X_PORT_LINK BIT(9) /* CTRL: auto negotiation */ +#define YT921X_PORT_HALF_PAUSE BIT(8) /* Half-duplex back pressure mode */ +#define YT921X_PORT_DUPLEX_FULL BIT(7) +#define YT921X_PORT_RX_PAUSE BIT(6) +#define YT921X_PORT_TX_PAUSE BIT(5) +#define YT921X_PORT_RX_MAC_EN BIT(4) +#define YT921X_PORT_TX_MAC_EN BIT(3) +#define YT921X_PORT_SPEED_M GENMASK(2, 0) +#define YT921X_PORT_SPEED(x) FIELD_PREP(YT921X_PORT_SPEED_M, (x)) +#define YT921X_PORT_SPEED_10 YT921X_PORT_SPEED(0) +#define YT921X_PORT_SPEED_100 YT921X_PORT_SPEED(1) +#define YT921X_PORT_SPEED_1000 YT921X_PORT_SPEED(2) +#define YT921X_PORT_SPEED_10000 YT921X_PORT_SPEED(3) +#define YT921X_PORT_SPEED_2500 YT921X_PORT_SPEED(4) +#define YT921X_PON_STRAP_FUNC 0x80320 +#define YT921X_PON_STRAP_VAL 0x80324 +#define YT921X_PON_STRAP_CAP 0x80328 +#define YT921X_PON_STRAP_EEE BIT(16) +#define YT921X_PON_STRAP_LOOP_DETECT BIT(7) +#define YT921X_MDIO_POLLINGn(port) (0x80364 + 4 * ((port) - 8)) +#define YT921X_MDIO_POLLING_DUPLEX_FULL BIT(4) +#define YT921X_MDIO_POLLING_LINK BIT(3) +#define YT921X_MDIO_POLLING_SPEED_M GENMASK(2, 0) +#define YT921X_MDIO_POLLING_SPEED(x) FIELD_PREP(YT921X_MDIO_POLLING_SPEED_M, (x)) +#define YT921X_MDIO_POLLING_SPEED_10 YT921X_MDIO_POLLING_SPEED(0) +#define YT921X_MDIO_POLLING_SPEED_100 YT921X_MDIO_POLLING_SPEED(1) +#define YT921X_MDIO_POLLING_SPEED_1000 YT921X_MDIO_POLLING_SPEED(2) +#define YT921X_MDIO_POLLING_SPEED_10000 YT921X_MDIO_POLLING_SPEED(3) +#define YT921X_MDIO_POLLING_SPEED_2500 YT921X_MDIO_POLLING_SPEED(4) +#define YT921X_SENSOR 0x8036c +#define YT921X_SENSOR_TEMP BIT(18) +#define YT921X_TEMP 0x80374 +#define YT921X_CHIP_MODE 0x80388 +#define YT921X_CHIP_MODE_MODE GENMASK(1, 0) +#define YT921X_XMII_CTRL 0x80394 +#define YT921X_XMII_CTRL_PORTn(port) BIT(9 - (port)) /* Yes, it's reversed */ +#define YT921X_XMIIn(port) (0x80400 + 8 * ((port) - 8)) +#define YT921X_XMII_MODE_M GENMASK(31, 29) +#define YT921X_XMII_MODE(x) FIELD_PREP(YT921X_XMII_MODE_M, (x)) +#define YT921X_XMII_MODE_MII YT921X_XMII_MODE(0) +#define YT921X_XMII_MODE_REVMII YT921X_XMII_MODE(1) +#define YT921X_XMII_MODE_RMII YT921X_XMII_MODE(2) +#define YT921X_XMII_MODE_REVRMII YT921X_XMII_MODE(3) +#define YT921X_XMII_MODE_RGMII YT921X_XMII_MODE(4) +#define YT921X_XMII_MODE_DISABLE YT921X_XMII_MODE(5) +#define YT921X_XMII_LINK BIT(19) /* force link */ +#define YT921X_XMII_EN BIT(18) +#define YT921X_XMII_SOFT_RST BIT(17) +#define YT921X_XMII_RGMII_TX_DELAY_150PS_M GENMASK(16, 13) +#define YT921X_XMII_RGMII_TX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_TX_DELAY_150PS_M, (x)) +#define YT921X_XMII_TX_CLK_IN BIT(11) +#define YT921X_XMII_RX_CLK_IN BIT(10) +#define YT921X_XMII_RGMII_TX_DELAY_2NS BIT(8) +#define YT921X_XMII_RGMII_TX_CLK_OUT BIT(7) +#define YT921X_XMII_RGMII_RX_DELAY_150PS_M GENMASK(6, 3) +#define YT921X_XMII_RGMII_RX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_RX_DELAY_150PS_M, (x)) +#define YT921X_XMII_RMII_PHY_TX_CLK_OUT BIT(2) +#define YT921X_XMII_REVMII_TX_CLK_OUT BIT(1) +#define YT921X_XMII_REVMII_RX_CLK_OUT BIT(0) + +#define YT921X_MACn_FRAME(port) (0x81008 + 0x1000 * (port)) +#define YT921X_MAC_FRAME_SIZE_M GENMASK(21, 8) +#define YT921X_MAC_FRAME_SIZE(x) FIELD_PREP(YT921X_MAC_FRAME_SIZE_M, (x)) + +#define YT921X_EEEn_VAL(port) (0xa0000 + 0x40 * (port)) +#define YT921X_EEE_VAL_DATA BIT(1) + +#define YT921X_EEE_CTRL 0xb0000 +#define YT921X_EEE_CTRL_ENn(port) BIT(port) + +#define YT921X_MIB_CTRL 0xc0004 +#define YT921X_MIB_CTRL_CLEAN BIT(30) +#define YT921X_MIB_CTRL_PORT_M GENMASK(6, 3) +#define YT921X_MIB_CTRL_PORT(x) FIELD_PREP(YT921X_MIB_CTRL_PORT_M, (x)) +#define YT921X_MIB_CTRL_ONE_PORT BIT(1) +#define YT921X_MIB_CTRL_ALL_PORT BIT(0) +#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port)) +#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x)) + +#define YT921X_EDATA_CTRL 0xe0000 +#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8) +#define YT921X_EDATA_CTRL_ADDR(x) FIELD_PREP(YT921X_EDATA_CTRL_ADDR_M, (x)) +#define YT921X_EDATA_CTRL_OP_M GENMASK(3, 0) +#define YT921X_EDATA_CTRL_OP(x) FIELD_PREP(YT921X_EDATA_CTRL_OP_M, (x)) +#define YT921X_EDATA_CTRL_READ YT921X_EDATA_CTRL_OP(5) +#define YT921X_EDATA_DATA 0xe0004 +#define YT921X_EDATA_DATA_DATA_M GENMASK(31, 24) +#define YT921X_EDATA_DATA_STATUS_M GENMASK(3, 0) +#define YT921X_EDATA_DATA_STATUS(x) FIELD_PREP(YT921X_EDATA_DATA_STATUS_M, (x)) +#define YT921X_EDATA_DATA_IDLE YT921X_EDATA_DATA_STATUS(3) + +#define YT921X_EXT_MBUS_OP 0x6a000 +#define YT921X_INT_MBUS_OP 0xf0000 +#define YT921X_MBUS_OP_START BIT(0) +#define YT921X_EXT_MBUS_CTRL 0x6a004 +#define YT921X_INT_MBUS_CTRL 0xf0004 +#define YT921X_MBUS_CTRL_PORT_M GENMASK(25, 21) +#define YT921X_MBUS_CTRL_PORT(x) FIELD_PREP(YT921X_MBUS_CTRL_PORT_M, (x)) +#define YT921X_MBUS_CTRL_REG_M GENMASK(20, 16) +#define YT921X_MBUS_CTRL_REG(x) FIELD_PREP(YT921X_MBUS_CTRL_REG_M, (x)) +#define YT921X_MBUS_CTRL_TYPE_M GENMASK(11, 8) /* wild guess */ +#define YT921X_MBUS_CTRL_TYPE(x) FIELD_PREP(YT921X_MBUS_CTRL_TYPE_M, (x)) +#define YT921X_MBUS_CTRL_TYPE_C22 YT921X_MBUS_CTRL_TYPE(4) +#define YT921X_MBUS_CTRL_OP_M GENMASK(3, 2) /* wild guess */ +#define YT921X_MBUS_CTRL_OP(x) FIELD_PREP(YT921X_MBUS_CTRL_OP_M, (x)) +#define YT921X_MBUS_CTRL_WRITE YT921X_MBUS_CTRL_OP(1) +#define YT921X_MBUS_CTRL_READ YT921X_MBUS_CTRL_OP(2) +#define YT921X_EXT_MBUS_DOUT 0x6a008 +#define YT921X_INT_MBUS_DOUT 0xf0008 +#define YT921X_EXT_MBUS_DIN 0x6a00c +#define YT921X_INT_MBUS_DIN 0xf000c + +#define YT921X_PORTn_EGR(port) (0x100000 + 4 * (port)) +#define YT921X_PORT_EGR_TPID_CTAG_M GENMASK(5, 4) +#define YT921X_PORT_EGR_TPID_CTAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_CTAG_M, (x)) +#define YT921X_PORT_EGR_TPID_STAG_M GENMASK(3, 2) +#define YT921X_PORT_EGR_TPID_STAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_STAG_M, (x)) +#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */ +#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0) + +#define YT921X_VLAN_IGR_FILTER 0x180280 +#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11) +#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port) +#define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port)) +#define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port) +#define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port)) +#define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22) +#define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21) +#define YT921X_PORT_LEARN_VID_LEARN_EN BIT(20) +#define YT921X_PORT_LEARN_SUSPEND_COPY_EN BIT(19) +#define YT921X_PORT_LEARN_SUSPEND_DROP_EN BIT(18) +#define YT921X_PORT_LEARN_DIS BIT(17) +#define YT921X_PORT_LEARN_LIMIT_EN BIT(16) +#define YT921X_PORT_LEARN_LIMIT_M GENMASK(15, 8) +#define YT921X_PORT_LEARN_LIMIT(x) FIELD_PREP(YT921X_PORT_LEARN_LIMIT_M, (x)) +#define YT921X_PORT_LEARN_DROP_ON_EXCEEDED BIT(2) +#define YT921X_PORT_LEARN_MODE_M GENMASK(1, 0) +#define YT921X_PORT_LEARN_MODE(x) FIELD_PREP(YT921X_PORT_LEARN_MODE_M, (x)) +#define YT921X_PORT_LEARN_MODE_AUTO YT921X_PORT_LEARN_MODE(0) +#define YT921X_PORT_LEARN_MODE_AUTO_AND_COPY YT921X_PORT_LEARN_MODE(1) +#define YT921X_PORT_LEARN_MODE_CPU_CONTROL YT921X_PORT_LEARN_MODE(2) +#define YT921X_AGEING 0x180440 +#define YT921X_AGEING_INTERVAL_M GENMASK(15, 0) +#define YT921X_FDB_IN0 0x180454 +#define YT921X_FDB_IN1 0x180458 +#define YT921X_FDB_IN2 0x18045c +#define YT921X_FDB_OP 0x180460 +#define YT921X_FDB_OP_INDEX_M GENMASK(22, 11) +#define YT921X_FDB_OP_INDEX(x) FIELD_PREP(YT921X_FDB_OP_INDEX_M, (x)) +#define YT921X_FDB_OP_MODE_INDEX BIT(10) /* mac+fid / index */ +#define YT921X_FDB_OP_FLUSH_MCAST BIT(9) /* ucast / mcast */ +#define YT921X_FDB_OP_FLUSH_M GENMASK(8, 7) +#define YT921X_FDB_OP_FLUSH(x) FIELD_PREP(YT921X_FDB_OP_FLUSH_M, (x)) +#define YT921X_FDB_OP_FLUSH_ALL YT921X_FDB_OP_FLUSH(0) +#define YT921X_FDB_OP_FLUSH_PORT YT921X_FDB_OP_FLUSH(1) +#define YT921X_FDB_OP_FLUSH_PORT_VID YT921X_FDB_OP_FLUSH(2) +#define YT921X_FDB_OP_FLUSH_VID YT921X_FDB_OP_FLUSH(3) +#define YT921X_FDB_OP_FLUSH_STATIC BIT(6) +#define YT921X_FDB_OP_NEXT_TYPE_M GENMASK(5, 4) +#define YT921X_FDB_OP_NEXT_TYPE(x) FIELD_PREP(YT921X_FDB_OP_NEXT_TYPE_M, (x)) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT YT921X_FDB_OP_NEXT_TYPE(0) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST_VID YT921X_FDB_OP_NEXT_TYPE(1) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST YT921X_FDB_OP_NEXT_TYPE(2) +#define YT921X_FDB_OP_NEXT_TYPE_MCAST YT921X_FDB_OP_NEXT_TYPE(3) +#define YT921X_FDB_OP_OP_M GENMASK(3, 1) +#define YT921X_FDB_OP_OP(x) FIELD_PREP(YT921X_FDB_OP_OP_M, (x)) +#define YT921X_FDB_OP_OP_ADD YT921X_FDB_OP_OP(0) +#define YT921X_FDB_OP_OP_DEL YT921X_FDB_OP_OP(1) +#define YT921X_FDB_OP_OP_GET_ONE YT921X_FDB_OP_OP(2) +#define YT921X_FDB_OP_OP_GET_NEXT YT921X_FDB_OP_OP(3) +#define YT921X_FDB_OP_OP_FLUSH YT921X_FDB_OP_OP(4) +#define YT921X_FDB_OP_START BIT(0) +#define YT921X_FDB_RESULT 0x180464 +#define YT921X_FDB_RESULT_DONE BIT(15) +#define YT921X_FDB_RESULT_NOTFOUND BIT(14) +#define YT921X_FDB_RESULT_OVERWRITED BIT(13) +#define YT921X_FDB_RESULT_INDEX_M GENMASK(11, 0) +#define YT921X_FDB_RESULT_INDEX(x) FIELD_PREP(YT921X_FDB_RESULT_INDEX_M, (x)) +#define YT921X_FDB_OUT0 0x1804b0 +#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0) +#define YT921X_FDB_OUT1 0x1804b4 +#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31) +#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28) +#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x)) +#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0) +#define YT921X_FDB_IO1_STATUS_MIN_TIME YT921X_FDB_IO1_STATUS(1) +#define YT921X_FDB_IO1_STATUS_MOVE_AGING_MAX_TIME YT921X_FDB_IO1_STATUS(3) +#define YT921X_FDB_IO1_STATUS_MAX_TIME YT921X_FDB_IO1_STATUS(5) +#define YT921X_FDB_IO1_STATUS_PENDING YT921X_FDB_IO1_STATUS(6) +#define YT921X_FDB_IO1_STATUS_STATIC YT921X_FDB_IO1_STATUS(7) +#define YT921X_FDB_IO1_FID_M GENMASK(27, 16) /* filtering ID (VID) */ +#define YT921X_FDB_IO1_FID(x) FIELD_PREP(YT921X_FDB_IO1_FID_M, (x)) +#define YT921X_FDB_IO1_ADDR_LO2_M GENMASK(15, 0) +#define YT921X_FDB_OUT2 0x1804b8 +#define YT921X_FDB_IO2_MOVE_AGING_STATUS_M GENMASK(31, 30) +#define YT921X_FDB_IO2_IGR_DROP BIT(29) +#define YT921X_FDB_IO2_EGR_PORTS_M GENMASK(28, 18) +#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x)) +#define YT921X_FDB_IO2_EGR_DROP BIT(17) +#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16) +#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15) +#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12) +#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x)) +#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0) +#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x)) +#define YT921X_FILTER_UNK_UCAST 0x180508 +#define YT921X_FILTER_UNK_MCAST 0x18050c +#define YT921X_FILTER_MCAST 0x180510 +#define YT921X_FILTER_BCAST 0x180514 +#define YT921X_FILTER_PORTS_M GENMASK(10, 0) +#define YT921X_FILTER_PORTS(x) FIELD_PREP(YT921X_FILTER_PORTS_M, (x)) +#define YT921X_FILTER_PORTn(port) BIT(port) +#define YT921X_VLAN_EGR_FILTER 0x180598 +#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port) +#define YT921X_CPU_COPY 0x180690 +#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2) +#define YT921X_CPU_COPY_TO_INT_CPU BIT(1) +#define YT921X_CPU_COPY_TO_EXT_CPU BIT(0) +#define YT921X_ACT_UNK_UCAST 0x180734 +#define YT921X_ACT_UNK_MCAST 0x180738 +#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_RMA BIT(23) +#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_IGMP BIT(22) +#define YT921X_ACT_UNK_ACTn_M(port) GENMASK(2 * (port) + 1, 2 * (port)) +#define YT921X_ACT_UNK_ACTn(port, x) ((x) << (2 * (port))) +#define YT921X_ACT_UNK_ACTn_FORWARD(port) YT921X_ACT_UNK_ACTn(port, 0) /* flood */ +#define YT921X_ACT_UNK_ACTn_TRAP(port) YT921X_ACT_UNK_ACTn(port, 1) /* steer to CPU */ +#define YT921X_ACT_UNK_ACTn_DROP(port) YT921X_ACT_UNK_ACTn(port, 2) /* discard */ +/* NEVER use this action; see comments in the tag driver */ +#define YT921X_ACT_UNK_ACTn_COPY(port) YT921X_ACT_UNK_ACTn(port, 3) /* flood and copy */ +#define YT921X_FDB_HW_FLUSH 0x180958 +#define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0) + +#define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan)) +#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK(50, 40) +#define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x)) +#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT((port) + 40) +#define YT921X_VLAN_CTRL_STP_ID_M GENMASK(39, 36) +#define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x)) +#define YT921X_VLAN_CTRL_SVLAN_EN BIT(35) +#define YT921X_VLAN_CTRL_FID_M GENMASK(34, 23) +#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x)) +#define YT921X_VLAN_CTRL_LEARN_DIS BIT(22) +#define YT921X_VLAN_CTRL_INT_PRI_EN BIT(21) +#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK(20, 18) +#define YT921X_VLAN_CTRL_PORTS_M GENMASK(17, 7) +#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x)) +#define YT921X_VLAN_CTRL_PORTn(port) BIT((port) + 7) +#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT(6) +#define YT921X_VLAN_CTRL_METER_EN BIT(5) +#define YT921X_VLAN_CTRL_METER_ID_M GENMASK(4, 0) + +#define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */ +#define YT921X_TPID_IGR_TPID_M GENMASK(15, 0) +#define YT921X_PORTn_IGR_TPID(port) (0x210010 + 4 * (port)) +#define YT921X_PORT_IGR_TPIDn_STAG_M GENMASK(7, 4) +#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4) +#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0) +#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x) + +#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port)) +#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31) +#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30) +#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18) +#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x)) +#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6) +#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x)) +#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3) +#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0) +#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port)) +#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8) +#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4) +#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_TAGGED BIT(3) +#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_UNTAGGED BIT(2) +#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED BIT(1) +#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED BIT(0) + +#define YT921X_MIRROR 0x300300 +#define YT921X_MIRROR_IGR_PORTS_M GENMASK(26, 16) +#define YT921X_MIRROR_IGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_IGR_PORTS_M, (x)) +#define YT921X_MIRROR_IGR_PORTn(port) BIT((port) + 16) +#define YT921X_MIRROR_EGR_PORTS_M GENMASK(14, 4) +#define YT921X_MIRROR_EGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_EGR_PORTS_M, (x)) +#define YT921X_MIRROR_EGR_PORTn(port) BIT((port) + 4) +#define YT921X_MIRROR_PORT_M GENMASK(3, 0) +#define YT921X_MIRROR_PORT(x) FIELD_PREP(YT921X_MIRROR_PORT_M, (x)) + +#define YT921X_EDATA_EXTMODE 0xfb +#define YT921X_EDATA_LEN 0x100 + +#define YT921X_FDB_NUM 4096 + +enum yt921x_fdb_entry_status { + YT921X_FDB_ENTRY_STATUS_INVALID = 0, + YT921X_FDB_ENTRY_STATUS_MIN_TIME = 1, + YT921X_FDB_ENTRY_STATUS_MOVE_AGING_MAX_TIME = 3, + YT921X_FDB_ENTRY_STATUS_MAX_TIME = 5, + YT921X_FDB_ENTRY_STATUS_PENDING = 6, + YT921X_FDB_ENTRY_STATUS_STATIC = 7, +}; + +#define YT9215_MAJOR 0x9002 +#define YT9218_MAJOR 0x9001 + +/* required for a hard reset */ +#define YT921X_RST_DELAY_US 10000 + +#define YT921X_FRAME_SIZE_MAX 0x2400 /* 9216 */ + +#define YT921X_TAG_LEN 8 + +/* 8 internal + 2 external + 1 mcu */ +#define YT921X_PORT_NUM 11 + +#define yt921x_port_is_internal(port) ((port) < 8) +#define yt921x_port_is_external(port) (8 <= (port) && (port) < 9) + +struct yt921x_mib { + u64 rx_broadcast; + u64 rx_pause; + u64 rx_multicast; + u64 rx_crc_errors; + + u64 rx_alignment_errors; + u64 rx_undersize_errors; + u64 rx_fragment_errors; + u64 rx_64byte; + + u64 rx_65_127byte; + u64 rx_128_255byte; + u64 rx_256_511byte; + u64 rx_512_1023byte; + + u64 rx_1024_1518byte; + u64 rx_jumbo; + u64 rx_good_bytes; + + u64 rx_bad_bytes; + u64 rx_oversize_errors; + + u64 rx_dropped; + u64 tx_broadcast; + u64 tx_pause; + u64 tx_multicast; + + u64 tx_undersize_errors; + u64 tx_64byte; + u64 tx_65_127byte; + u64 tx_128_255byte; + + u64 tx_256_511byte; + u64 tx_512_1023byte; + u64 tx_1024_1518byte; + u64 tx_jumbo; + + u64 tx_good_bytes; + u64 tx_collisions; + + u64 tx_aborted_errors; + u64 tx_multiple_collisions; + u64 tx_single_collisions; + u64 tx_good; + + u64 tx_deferred; + u64 tx_late_collisions; + u64 rx_oam; + u64 tx_oam; +}; + +struct yt921x_port { + unsigned char index; + + bool hairpin; + bool isolated; + + struct delayed_work mib_read; + struct yt921x_mib mib; + u64 rx_frames; + u64 tx_frames; +}; + +struct yt921x_reg_ops { + int (*read)(void *context, u32 reg, u32 *valp); + int (*write)(void *context, u32 reg, u32 val); +}; + +struct yt921x_priv { + struct dsa_switch ds; + + const struct yt921x_info *info; + /* cache of dsa_cpu_ports(ds) */ + u16 cpu_ports_mask; + + /* protect the access to the switch registers */ + struct mutex reg_lock; + const struct yt921x_reg_ops *reg_ops; + void *reg_ctx; + + /* mdio master bus */ + struct mii_bus *mbus_int; + struct mii_bus *mbus_ext; + + struct yt921x_port ports[YT921X_PORT_NUM]; + + u16 eee_ports_mask; +}; + +#endif diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index ecdea58e6a21..2227c83a4862 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1547,9 +1547,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_msglevel = netdev_set_msglevel, }; - #ifdef MODULE -void cleanup_module(void) +static void __exit corkscrew_exit_module(void) { while (!list_empty(&root_corkscrew_dev)) { struct net_device *dev; @@ -1563,4 +1562,5 @@ void cleanup_module(void) free_netdev(dev); } } +module_exit(corkscrew_exit_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index aead145dd91d..4a1b368ca7e6 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -129,6 +129,7 @@ source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/mucse/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 998dd628b202..2e18df8ca8ec 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ +obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 433a646e9831..75893c90a0a1 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -137,11 +137,11 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth) for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) airoha_fe_set(eth, REG_GDM_FWD_CFG(p), - GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | - GDM_DROP_CRC_ERR); + GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK | + GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK); - airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, - FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); + airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK, + FIELD_PREP(CDM_VLAN_MASK, 0x8100)); airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); } @@ -297,8 +297,11 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth) int q; all_rsv = airoha_fe_get_pse_all_rsv(eth); - /* hw misses PPE2 oq rsv */ - all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; + if (airoha_ppe_is_enabled(eth, 1)) { + /* hw misses PPE2 oq rsv */ + all_rsv += PSE_RSV_PAGES * + pse_port_num_queues[FE_PSE_PORT_PPE2]; + } airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); /* CMD1 */ @@ -335,13 +338,17 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth) for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, PSE_QUEUE_RSV_PAGES); - /* PPE2 */ - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { - if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, - PSE_QUEUE_RSV_PAGES); - else - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); + if (airoha_ppe_is_enabled(eth, 1)) { + /* PPE2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, + q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, + q, 0); + } } /* GMD4 */ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) @@ -396,46 +403,46 @@ static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) { /* CDM1_CRSN_QSEL */ - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), CDM_CRSN_QSEL_Q6)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), CDM_CRSN_QSEL_Q1)); /* CDM2_CRSN_QSEL */ - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), CDM_CRSN_QSEL_Q6)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), CDM_CRSN_QSEL_Q1)); } @@ -455,18 +462,18 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_wr(eth, REG_FE_PCE_CFG, PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); /* set vip queue selection to ring 1 */ - airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, - FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, - FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK, + FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK, + FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); /* set GDM4 source interface offset to 8 */ - airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, - GDM4_SPORT_OFF2_MASK | - GDM4_SPORT_OFF1_MASK | - GDM4_SPORT_OFF0_MASK, - FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | - FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | - FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); + airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4), + GDM_SPORT_OFF2_MASK | + GDM_SPORT_OFF1_MASK | + GDM_SPORT_OFF0_MASK, + FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) | + FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) | + FIELD_PREP(GDM_SPORT_OFF0_MASK, 8)); /* set PSE Page as 128B */ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, @@ -492,8 +499,8 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_GDM_MISC_CFG, GDM2_RDM_ACK_WAIT_PREF_MASK | GDM2_CHN_VLD_MODE_MASK); - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, - FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK, + FIELD_PREP(CDM_OAM_QSEL_MASK, 15)); /* init fragment and assemble Force Port */ /* NPU Core-3, NPU Bridge Channel-3 */ @@ -507,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth) FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); - airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); - airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK); airoha_fe_crsn_qsel_init(eth); @@ -516,7 +523,7 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); /* default aging mode for mbi unlock issue */ - airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, + airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2), MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); @@ -524,25 +531,6 @@ static int airoha_fe_init(struct airoha_eth *eth) /* disable IFC by default */ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1)); - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1), - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2)); - /* enable 1:N vlan action, init vlan table */ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); @@ -904,19 +892,13 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, DMA_TO_DEVICE); - memset(e, 0, sizeof(*e)); + e->dma_addr = 0; + list_add_tail(&e->list, &q->tx_list); + WRITE_ONCE(desc->msg0, 0); WRITE_ONCE(desc->msg1, 0); q->queued--; - /* completion ring can report out-of-order indexes if hw QoS - * is enabled and packets with different priority are queued - * to same DMA ring. Take into account possible out-of-order - * reports incrementing DMA ring tail pointer - */ - while (q->tail != q->head && !q->entry[q->tail].dma_addr) - q->tail = (q->tail + 1) % q->ndesc; - if (skb) { u16 queue = skb_get_queue_mapping(skb); struct netdev_queue *txq; @@ -961,6 +943,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, q->ndesc = size; q->qdma = qdma; q->free_thr = 1 + MAX_SKB_FRAGS; + INIT_LIST_HEAD(&q->tx_list); q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), GFP_KERNEL); @@ -973,9 +956,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, return -ENOMEM; for (i = 0; i < q->ndesc; i++) { - u32 val; + u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); - val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); + list_add_tail(&q->entry[i].list, &q->tx_list); WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); } @@ -985,9 +968,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_CPU_IDX_MASK, 0)); airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, - FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_DMA_IDX_MASK, 0)); return 0; } @@ -1043,17 +1026,21 @@ static int airoha_qdma_init_tx(struct airoha_qdma *qdma) static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) { struct airoha_eth *eth = q->qdma->eth; + int i; spin_lock_bh(&q->lock); - while (q->queued) { - struct airoha_queue_entry *e = &q->entry[q->tail]; + for (i = 0; i < q->ndesc; i++) { + struct airoha_queue_entry *e = &q->entry[i]; + + if (!e->dma_addr) + continue; dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, DMA_TO_DEVICE); dev_kfree_skb_any(e->skb); + e->dma_addr = 0; e->skb = NULL; - - q->tail = (q->tail + 1) % q->ndesc; + list_add_tail(&e->list, &q->tx_list); q->queued--; } spin_unlock_bh(&q->lock); @@ -1387,8 +1374,7 @@ static int airoha_hw_init(struct platform_device *pdev, int err, i; /* disable xsi */ - err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), - eth->xsi_rsts); + err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts); if (err) return err; @@ -1695,19 +1681,23 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) return 0; } -static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) +static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) { - u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; struct airoha_eth *eth = port->qdma->eth; - u32 chan = port->id == 3 ? 4 : 0; + u32 val, pse_port, chan, nbq; + int src_port; /* Forward the traffic to the proper GDM port */ + pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 + : FE_PSE_PORT_GDM4; airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); - airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); + airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK); /* Enable GDM2 loopback */ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); + + chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0; airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, FIELD_PREP(LPBK_CHAN_MASK, chan) | @@ -1722,36 +1712,36 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); - if (port->id == 3) { - /* FIXME: handle XSI_PCE1_PORT */ - airoha_fe_rmw(eth, REG_FE_WAN_PORT, - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, - FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); - airoha_fe_rmw(eth, - REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), - SP_CPORT_PCIE0_MASK, - FIELD_PREP(SP_CPORT_PCIE0_MASK, - FE_PSE_PORT_CDM2)); - } else { - /* FIXME: handle XSI_USB_PORT */ + /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */ + nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; + src_port = eth->soc->ops.get_src_port_id(port, nbq); + if (src_port < 0) + return src_port; + + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, src_port)); + val = src_port & SP_CPORT_DFT_MASK; + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)), + SP_CPORT_MASK(val), + FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val))); + + if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth)) airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, FC_ID_OF_SRC_PORT24_MASK, FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); - airoha_fe_rmw(eth, REG_FE_WAN_PORT, - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, - FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); - airoha_fe_rmw(eth, - REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), - SP_CPORT_ETH_MASK, - FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); - } + + return 0; } static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->qdma->eth; - u32 pse_port; + struct airoha_qdma *qdma = port->qdma; + struct airoha_eth *eth = qdma->eth; + u32 pse_port, fe_cpu_port; + u8 ppe_id; airoha_set_macaddr(port, dev->dev_addr); @@ -1759,18 +1749,37 @@ static int airoha_dev_init(struct net_device *dev) case 3: case 4: /* If GDM2 is active we can't enable loopback */ - if (!eth->ports[1]) - airhoha_set_gdm2_loopback(port); + if (!eth->ports[1]) { + int err; + + err = airhoha_set_gdm2_loopback(port); + if (err) + return err; + } fallthrough; case 2: - pse_port = FE_PSE_PORT_PPE2; - break; - default: + if (airoha_ppe_is_enabled(eth, 1)) { + /* For PPE2 always use secondary cpu port. */ + fe_cpu_port = FE_PSE_PORT_CDM2; + pse_port = FE_PSE_PORT_PPE2; + break; + } + fallthrough; + default: { + u8 qdma_id = qdma - ð->qdma[0]; + + /* For PPE1 select cpu port according to the running QDMA. */ + fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1; pse_port = FE_PSE_PORT_PPE1; break; } + } airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); + ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0; + airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id), + DFT_CPORT_MASK(port->id), + fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id))); return 0; } @@ -1873,18 +1882,20 @@ static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) #endif } -static bool airoha_dev_tx_queue_busy(struct airoha_queue *q, u32 nr_frags) +static int airoha_get_fe_port(struct airoha_gdm_port *port) { - u32 tail = q->tail <= q->head ? q->tail + q->ndesc : q->tail; - u32 index = q->head + nr_frags; + struct airoha_qdma *qdma = port->qdma; + struct airoha_eth *eth = qdma->eth; - /* completion napi can free out-of-order tx descriptors if hw QoS is - * enabled and packets with different priorities are queued to the same - * DMA ring. Take into account possible out-of-order reports checking - * if the tx queue is full using circular buffer head/tail pointers - * instead of the number of queued packets. - */ - return index >= tail; + switch (eth->soc->version) { + case 0x7583: + return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 + : port->id; + case 0x7581: + default: + return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4 + : port->id; + } } static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, @@ -1893,8 +1904,10 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_qdma *qdma = port->qdma; u32 nr_frags, tag, msg0, msg1, len; + struct airoha_queue_entry *e; struct netdev_queue *txq; struct airoha_queue *q; + LIST_HEAD(tx_list); void *data; int i, qid; u16 index; @@ -1927,7 +1940,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, } } - fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + fport = airoha_get_fe_port(port); msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); @@ -1940,7 +1953,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, txq = netdev_get_tx_queue(dev, qid); nr_frags = 1 + skb_shinfo(skb)->nr_frags; - if (airoha_dev_tx_queue_busy(q, nr_frags)) { + if (q->queued + nr_frags >= q->ndesc) { /* not enough space in the queue */ netif_tx_stop_queue(txq); spin_unlock_bh(&q->lock); @@ -1949,11 +1962,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, len = skb_headlen(skb); data = skb->data; - index = q->head; + + e = list_first_entry(&q->tx_list, struct airoha_queue_entry, + list); + index = e - q->entry; for (i = 0; i < nr_frags; i++) { struct airoha_qdma_desc *desc = &q->desc[index]; - struct airoha_queue_entry *e = &q->entry[index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr; u32 val; @@ -1963,7 +1978,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(dev->dev.parent, addr))) goto error_unmap; - index = (index + 1) % q->ndesc; + list_move_tail(&e->list, &tx_list); + e->skb = i ? NULL : skb; + e->dma_addr = addr; + e->dma_len = len; + + e = list_first_entry(&q->tx_list, struct airoha_queue_entry, + list); + index = e - q->entry; val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); if (i < nr_frags - 1) @@ -1976,15 +1998,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); - e->skb = i ? NULL : skb; - e->dma_addr = addr; - e->dma_len = len; - data = skb_frag_address(frag); len = skb_frag_size(frag); } - - q->head = index; q->queued += i; skb_tx_timestamp(skb); @@ -1993,7 +2009,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, if (netif_xmit_stopped(txq) || !netdev_xmit_more()) airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); if (q->ndesc - q->queued < q->free_thr) netif_tx_stop_queue(txq); @@ -2003,10 +2019,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; error_unmap: - for (i--; i >= 0; i--) { - index = (q->head + i) % q->ndesc; - dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, - q->entry[index].dma_len, DMA_TO_DEVICE); + while (!list_empty(&tx_list)) { + e = list_first_entry(&tx_list, struct airoha_queue_entry, + list); + dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + e->dma_addr = 0; + list_move_tail(&e->list, &q->tx_list); } spin_unlock_bh(&q->lock); @@ -2036,8 +2055,12 @@ static void airoha_ethtool_get_mac_stats(struct net_device *dev, airoha_update_hw_stats(port); do { start = u64_stats_fetch_begin(&port->stats.syncp); + stats->FramesTransmittedOK = port->stats.tx_ok_pkts; + stats->OctetsTransmittedOK = port->stats.tx_ok_bytes; stats->MulticastFramesXmittedOK = port->stats.tx_multicast; stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; + stats->FramesReceivedOK = port->stats.rx_ok_pkts; + stats->OctetsReceivedOK = port->stats.rx_ok_bytes; stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; } while (u64_stats_fetch_retry(&port->stats.syncp, start)); } @@ -2780,6 +2803,7 @@ static const struct ethtool_ops airoha_ethtool_ops = { .get_drvinfo = airoha_ethtool_get_drvinfo, .get_eth_mac_stats = airoha_ethtool_get_mac_stats, .get_rmon_stats = airoha_ethtool_get_rmon_stats, + .get_link = ethtool_op_get_link, }; static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) @@ -2917,6 +2941,7 @@ free_metadata_dst: static int airoha_probe(struct platform_device *pdev) { + struct reset_control_bulk_data *xsi_rsts; struct device_node *np; struct airoha_eth *eth; int i, err; @@ -2925,6 +2950,10 @@ static int airoha_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + eth->soc = of_device_get_match_data(&pdev->dev); + if (!eth->soc) + return -EINVAL; + eth->dev = &pdev->dev; err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); @@ -2949,13 +2978,18 @@ static int airoha_probe(struct platform_device *pdev) return err; } - eth->xsi_rsts[0].id = "xsi-mac"; - eth->xsi_rsts[1].id = "hsi0-mac"; - eth->xsi_rsts[2].id = "hsi1-mac"; - eth->xsi_rsts[3].id = "hsi-mac"; - eth->xsi_rsts[4].id = "xfp-mac"; + xsi_rsts = devm_kcalloc(eth->dev, + eth->soc->num_xsi_rsts, sizeof(*xsi_rsts), + GFP_KERNEL); + if (!xsi_rsts) + return -ENOMEM; + + eth->xsi_rsts = xsi_rsts; + for (i = 0; i < eth->soc->num_xsi_rsts; i++) + eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i]; + err = devm_reset_control_bulk_get_exclusive(eth->dev, - ARRAY_SIZE(eth->xsi_rsts), + eth->soc->num_xsi_rsts, eth->xsi_rsts); if (err) { dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); @@ -3043,8 +3077,90 @@ static void airoha_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); } +static const char * const en7581_xsi_rsts_names[] = { + "xsi-mac", + "hsi0-mac", + "hsi1-mac", + "hsi-mac", + "xfp-mac", +}; + +static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq) +{ + switch (port->id) { + case 3: + /* 7581 SoC supports PCIe serdes on GDM3 port */ + if (nbq == 4) + return HSGMII_LAN_7581_PCIE0_SRCPORT; + if (nbq == 5) + return HSGMII_LAN_7581_PCIE1_SRCPORT; + break; + case 4: + /* 7581 SoC supports eth and usb serdes on GDM4 port */ + if (!nbq) + return HSGMII_LAN_7581_ETH_SRCPORT; + if (nbq == 1) + return HSGMII_LAN_7581_USB_SRCPORT; + break; + default: + break; + } + + return -EINVAL; +} + +static const char * const an7583_xsi_rsts_names[] = { + "xsi-mac", + "hsi0-mac", + "hsi1-mac", + "xfp-mac", +}; + +static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq) +{ + switch (port->id) { + case 3: + /* 7583 SoC supports eth serdes on GDM3 port */ + if (!nbq) + return HSGMII_LAN_7583_ETH_SRCPORT; + break; + case 4: + /* 7583 SoC supports PCIe and USB serdes on GDM4 port */ + if (!nbq) + return HSGMII_LAN_7583_PCIE_SRCPORT; + if (nbq == 1) + return HSGMII_LAN_7583_USB_SRCPORT; + break; + default: + break; + } + + return -EINVAL; +} + +static const struct airoha_eth_soc_data en7581_soc_data = { + .version = 0x7581, + .xsi_rsts_names = en7581_xsi_rsts_names, + .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names), + .num_ppe = 2, + .ops = { + .get_src_port_id = airoha_en7581_get_src_port_id, + }, +}; + +static const struct airoha_eth_soc_data an7583_soc_data = { + .version = 0x7583, + .xsi_rsts_names = an7583_xsi_rsts_names, + .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names), + .num_ppe = 1, + .ops = { + .get_src_port_id = airoha_an7583_get_src_port_id, + }, +}; + static const struct of_device_id of_airoha_match[] = { - { .compatible = "airoha,en7581-eth" }, + { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data }, + { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_airoha_match); diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index cd13c1c1224f..fbbc58133364 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -21,7 +21,6 @@ #define AIROHA_MAX_NUM_IRQ_BANKS 4 #define AIROHA_MAX_DSA_PORTS 7 #define AIROHA_MAX_NUM_RSTS 3 -#define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 9216 #define AIROHA_MAX_PACKET_SIZE 2048 #define AIROHA_NUM_QOS_CHANNELS 4 @@ -48,20 +47,9 @@ #define QDMA_METER_IDX(_n) ((_n) & 0xff) #define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) -#define PPE_NUM 2 -#define PPE1_SRAM_NUM_ENTRIES (8 * 1024) -#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) -#ifdef CONFIG_NET_AIROHA_FLOW_STATS -#define PPE1_STATS_NUM_ENTRIES (4 * 1024) -#else -#define PPE1_STATS_NUM_ENTRIES 0 -#endif /* CONFIG_NET_AIROHA_FLOW_STATS */ -#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) -#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) -#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) +#define PPE_SRAM_NUM_ENTRIES (8 * 1024) +#define PPE_STATS_NUM_ENTRIES (4 * 1024) #define PPE_DRAM_NUM_ENTRIES (16 * 1024) -#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) -#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) #define PPE_ENTRY_SIZE 80 #define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10)) @@ -79,10 +67,16 @@ enum { }; enum { - HSGMII_LAN_PCIE0_SRCPORT = 0x16, - HSGMII_LAN_PCIE1_SRCPORT, - HSGMII_LAN_ETH_SRCPORT, - HSGMII_LAN_USB_SRCPORT, + HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16, + HSGMII_LAN_7581_PCIE1_SRCPORT, + HSGMII_LAN_7581_ETH_SRCPORT, + HSGMII_LAN_7581_USB_SRCPORT, +}; + +enum { + HSGMII_LAN_7583_ETH_SRCPORT = 0x16, + HSGMII_LAN_7583_PCIE_SRCPORT = 0x18, + HSGMII_LAN_7583_USB_SRCPORT, }; enum { @@ -111,6 +105,13 @@ enum { CRSN_25 = 0x19, }; +enum airoha_gdm_index { + AIROHA_GDM1_IDX = 1, + AIROHA_GDM2_IDX = 2, + AIROHA_GDM3_IDX = 3, + AIROHA_GDM4_IDX = 4, +}; + enum { FE_PSE_PORT_CDM1, FE_PSE_PORT_GDM1, @@ -168,7 +169,10 @@ enum trtcm_param { struct airoha_queue_entry { union { void *buf; - struct sk_buff *skb; + struct { + struct list_head list; + struct sk_buff *skb; + }; }; dma_addr_t dma_addr; u16 dma_len; @@ -192,6 +196,8 @@ struct airoha_queue { struct napi_struct napi; struct page_pool *page_pool; struct sk_buff *skb; + + struct list_head tx_list; }; struct airoha_tx_irq_queue { @@ -554,7 +560,7 @@ struct airoha_ppe { struct rhashtable l2_flows; struct hlist_head *foe_flow; - u16 foe_check_time[PPE_NUM_ENTRIES]; + u16 *foe_check_time; struct airoha_foe_stats *foe_stats; dma_addr_t foe_stats_dma; @@ -562,9 +568,21 @@ struct airoha_ppe { struct dentry *debugfs_dir; }; +struct airoha_eth_soc_data { + u16 version; + const char * const *xsi_rsts_names; + int num_xsi_rsts; + int num_ppe; + struct { + int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq); + } ops; +}; + struct airoha_eth { struct device *dev; + const struct airoha_eth_soc_data *soc; + unsigned long state; void __iomem *fe_regs; @@ -574,7 +592,7 @@ struct airoha_eth { struct rhashtable flow_table; struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + struct reset_control_bulk_data *xsi_rsts; struct net_device *napi_dev; @@ -617,15 +635,27 @@ static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) return port->id == 1; } +static inline bool airoha_is_7581(struct airoha_eth *eth) +{ + return eth->soc->version == 0x7581; +} + +static inline bool airoha_is_7583(struct airoha_eth *eth) +{ + return eth->soc->version == 0x7583; +} + bool airoha_is_valid_gdm_port(struct airoha_eth *eth, struct airoha_gdm_port *port); +bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index); void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, u16 hash, bool rx_wlan); int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data); int airoha_ppe_init(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth); void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port); +u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe); struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash); void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 8c883f2b2d36..68b7f9684dc7 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -16,6 +16,8 @@ #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" #define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin" +#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin" +#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin" #define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000 #define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000 #define NPU_DUMP_SIZE 512 @@ -103,6 +105,16 @@ enum { QDMA_WAN_PON_XDSL, }; +struct airoha_npu_fw { + const char *name; + int max_size; +}; + +struct airoha_npu_soc_data { + struct airoha_npu_fw fw_rv32; + struct airoha_npu_fw fw_data; +}; + #define MBOX_MSG_FUNC_ID GENMASK(14, 11) #define MBOX_MSG_STATIC_BUF BIT(5) #define MBOX_MSG_STATUS GENMASK(4, 2) @@ -182,49 +194,53 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, return ret; } -static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, - struct resource *res) +static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr, + const struct airoha_npu_fw *fw_info) { const struct firmware *fw; - void __iomem *addr; int ret; - ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev); + ret = request_firmware(&fw, fw_info->name, dev); if (ret) return ret == -ENOENT ? -EPROBE_DEFER : ret; - if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) { + if (fw->size > fw_info->max_size) { dev_err(dev, "%s: fw size too overlimit (%zu)\n", - NPU_EN7581_FIRMWARE_RV32, fw->size); + fw_info->name, fw->size); ret = -E2BIG; goto out; } - addr = devm_ioremap_resource(dev, res); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto out; - } - memcpy_toio(addr, fw->data, fw->size); +out: release_firmware(fw); - ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev); - if (ret) - return ret == -ENOENT ? -EPROBE_DEFER : ret; + return ret; +} - if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) { - dev_err(dev, "%s: fw size too overlimit (%zu)\n", - NPU_EN7581_FIRMWARE_DATA, fw->size); - ret = -E2BIG; - goto out; - } +static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, + struct resource *res) +{ + const struct airoha_npu_soc_data *soc; + void __iomem *addr; + int ret; - memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size); -out: - release_firmware(fw); + soc = of_device_get_match_data(dev); + if (!soc) + return -EINVAL; - return ret; + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); + + /* Load rv32 npu firmware */ + ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32); + if (ret) + return ret; + + /* Load data npu firmware */ + return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM, + &soc->fw_data); } static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance) @@ -597,8 +613,31 @@ void airoha_npu_put(struct airoha_npu *npu) } EXPORT_SYMBOL_GPL(airoha_npu_put); +static const struct airoha_npu_soc_data en7581_npu_soc_data = { + .fw_rv32 = { + .name = NPU_EN7581_FIRMWARE_RV32, + .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE, + }, + .fw_data = { + .name = NPU_EN7581_FIRMWARE_DATA, + .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE, + }, +}; + +static const struct airoha_npu_soc_data an7583_npu_soc_data = { + .fw_rv32 = { + .name = NPU_AN7583_FIRMWARE_RV32, + .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE, + }, + .fw_data = { + .name = NPU_AN7583_FIRMWARE_DATA, + .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE, + }, +}; + static const struct of_device_id of_airoha_npu_match[] = { - { .compatible = "airoha,en7581-npu" }, + { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data }, + { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_airoha_npu_match); @@ -737,6 +776,8 @@ module_platform_driver(airoha_npu_driver); MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA); MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32); +MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA); +MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_DESCRIPTION("Airoha Network Processor Unit driver"); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 691361b25407..c373f21d95f5 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -32,9 +32,50 @@ static const struct rhashtable_params airoha_l2_flow_table_params = { .automatic_shrinking = true, }; -static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) +static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe) { - return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; + if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS)) + return -EOPNOTSUPP; + + if (airoha_is_7583(ppe->eth)) + return -EOPNOTSUPP; + + return PPE_STATS_NUM_ENTRIES; +} + +static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe) +{ + int num_stats = airoha_ppe_get_num_stats_entries(ppe); + + if (num_stats > 0) { + struct airoha_eth *eth = ppe->eth; + + num_stats = num_stats * eth->soc->num_ppe; + } + + return num_stats; +} + +static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe) +{ + struct airoha_eth *eth = ppe->eth; + + return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe; +} + +u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe) +{ + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + + return sram_num_entries + PPE_DRAM_NUM_ENTRIES; +} + +bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index) +{ + if (index >= eth->soc->num_ppe) + return false; + + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK; } static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) @@ -46,14 +87,22 @@ static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) static void airoha_ppe_hw_init(struct airoha_ppe *ppe) { - u32 sram_tb_size, sram_num_entries, dram_num_entries; + u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries; + u32 sram_tb_size, dram_num_entries; struct airoha_eth *eth = ppe->eth; - int i; + int i, sram_num_stats_entries; - sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry); dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); - for (i = 0; i < PPE_NUM; i++) { + sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe); + if (sram_num_stats_entries > 0) + sram_ppe_num_data_entries -= sram_num_stats_entries; + sram_ppe_num_data_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries); + + for (i = 0; i < eth->soc->num_ppe; i++) { int p; airoha_fe_wr(eth, REG_PPE_TB_BASE(i), @@ -85,10 +134,16 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK | PPE_TB_CFG_KEEPALIVE_MASK | PPE_TB_ENTRY_SIZE_MASK, FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | - FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) | + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_ppe_num_data_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); @@ -101,35 +156,6 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) FIELD_PREP(FP1_EGRESS_MTU_MASK, AIROHA_MAX_MTU)); } - - if (airoha_ppe2_is_enabled(eth)) { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } else { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } } static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) @@ -428,9 +454,11 @@ static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, return 0; } -static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) +static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe, + struct airoha_foe_entry *hwe) { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u32 hash, hv1, hv2, hv3; switch (type) { @@ -468,25 +496,31 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) case PPE_PKT_TYPE_IPV6_6RD: default: WARN_ON_ONCE(1); - return PPE_HASH_MASK; + return ppe_hash_mask; } hash = (hv1 & hv2) | ((~hv1) & hv3); hash = (hash >> 24) | ((hash & 0xffffff) << 8); hash ^= hv1 ^ hv2 ^ hv3; hash ^= hash >> 16; - hash &= PPE_NUM_ENTRIES - 1; + hash &= ppe_hash_mask; return hash; } -static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) +static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, + u32 hash, u32 *index) { - if (!airoha_ppe2_is_enabled(ppe->eth)) - return hash; + int ppe_num_stats_entries; + + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return ppe_num_stats_entries; - return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES - : hash; + *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES + : hash; + + return 0; } static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, @@ -500,9 +534,13 @@ static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, struct airoha_npu *npu) { - int i; + int i, ppe_num_stats_entries; - for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + for (i = 0; i < ppe_num_stats_entries; i++) airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); } @@ -513,10 +551,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); u32 index, pse_port, val, *data, *ib2, *meter; + int ppe_num_stats_entries; u8 nbq; - index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); - if (index >= PPE_STATS_NUM_ENTRIES) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; if (type == PPE_PKT_TYPE_BRIDGE) { @@ -557,17 +602,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, static struct airoha_foe_entry * airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + lockdep_assert_held(&ppe_lock); - if (hash < PPE_SRAM_NUM_ENTRIES) { + if (hash < sram_num_entries) { u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; struct airoha_eth *eth = ppe->eth; - bool ppe2; u32 val; int i; - ppe2 = airoha_ppe2_is_enabled(ppe->eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | PPE_SRAM_CTRL_REQ_MASK); @@ -577,7 +622,8 @@ airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) REG_PPE_RAM_CTRL(ppe2))) return NULL; - for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) + for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe); + i++) hwe[i] = airoha_fe_rr(eth, REG_PPE_RAM_ENTRY(ppe2, i)); } @@ -614,10 +660,32 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); } +static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash) +{ + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; + u32 *ptr = (u32 *)hwe, val; + int i; + + for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++) + airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]); + + wmb(); + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | + PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK); + + return read_poll_timeout_atomic(airoha_fe_rr, val, + val & PPE_SRAM_CTRL_ACK_MASK, + 10, 100, false, ppe->eth, + REG_PPE_RAM_CTRL(ppe2)); +} + static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *e, u32 hash, bool rx_wlan) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); u32 ts = airoha_ppe_get_timestamp(ppe); struct airoha_eth *eth = ppe->eth; @@ -642,14 +710,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, if (!rx_wlan) airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); - if (hash < PPE_SRAM_NUM_ENTRIES) { - dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); - bool ppe2 = airoha_ppe2_is_enabled(eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; - - err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), - hash, ppe2); - } + if (hash < sram_num_entries) + err = airoha_ppe_foe_commit_sram_entry(ppe, hash); unlock: rcu_read_unlock(); @@ -772,7 +834,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, if (state == AIROHA_FOE_STATE_BIND) goto unlock; - index = airoha_ppe_foe_get_entry_hash(hwe); + index = airoha_ppe_foe_get_entry_hash(ppe, hwe); hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { if (e->type == FLOW_TYPE_L2_SUBFLOW) { state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); @@ -832,7 +894,7 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, if (type == PPE_PKT_TYPE_BRIDGE) return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); - hash = airoha_ppe_foe_get_entry_hash(&e->data); + hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data); e->type = FLOW_TYPE_L4; e->hash = 0xffff; @@ -1158,11 +1220,19 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth, void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, struct airoha_foe_stats64 *stats) { - u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); struct airoha_eth *eth = ppe->eth; + int ppe_num_stats_entries; struct airoha_npu *npu; + u32 index; - if (index >= PPE_STATS_NUM_ENTRIES) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; rcu_read_lock(); @@ -1225,20 +1295,22 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth, return -EOPNOTSUPP; } -static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, - struct airoha_npu *npu) +static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe) { - int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe; + int i, err = 0; - if (airoha_ppe2_is_enabled(ppe->eth)) - sram_num_entries = sram_num_entries / 2; + for (i = 0; i < sram_num_entries; i++) { + int err; - for (i = 0; i < sram_num_entries; i++) memset(&hwe[i], 0, sizeof(*hwe)); + err = airoha_ppe_foe_commit_sram_entry(ppe, i); + if (err) + break; + } - return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, - PPE_SRAM_NUM_ENTRIES); + return err; } static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) @@ -1257,7 +1329,7 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) { struct airoha_npu *npu = airoha_ppe_npu_get(eth); struct airoha_ppe *ppe = eth->ppe; - int err; + int err, ppe_num_stats_entries; if (IS_ERR(npu)) return PTR_ERR(npu); @@ -1266,18 +1338,15 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) if (err) goto error_npu_put; - if (PPE_STATS_NUM_ENTRIES) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma, - PPE_STATS_NUM_ENTRIES); + ppe_num_stats_entries); if (err) goto error_npu_put; } airoha_ppe_hw_init(ppe); - err = airoha_ppe_flush_sram_entries(ppe, npu); - if (err) - goto error_npu_put; - airoha_ppe_foe_flow_stats_reset(ppe, npu); rcu_assign_pointer(eth->npu, npu); @@ -1313,9 +1382,10 @@ void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, u16 hash, bool rx_wlan) { struct airoha_ppe *ppe = dev->priv; + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u16 now, diff; - if (hash > PPE_HASH_MASK) + if (hash > ppe_hash_mask) return; now = (u16)jiffies; @@ -1405,8 +1475,9 @@ EXPORT_SYMBOL_GPL(airoha_ppe_put_dev); int airoha_ppe_init(struct airoha_eth *eth) { + int foe_size, err, ppe_num_stats_entries; + u32 ppe_num_entries; struct airoha_ppe *ppe; - int foe_size, err; ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); if (!ppe) @@ -1415,24 +1486,25 @@ int airoha_ppe_init(struct airoha_eth *eth) ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb; ppe->dev.ops.check_skb = airoha_ppe_check_skb; ppe->dev.priv = ppe; + ppe->eth = eth; + eth->ppe = ppe; - foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); + foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry); ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, GFP_KERNEL); if (!ppe->foe) return -ENOMEM; - ppe->eth = eth; - eth->ppe = ppe; - ppe->foe_flow = devm_kzalloc(eth->dev, - PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), + ppe_num_entries * sizeof(*ppe->foe_flow), GFP_KERNEL); if (!ppe->foe_flow) return -ENOMEM; - foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); - if (foe_size) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { + foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats); ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_stats_dma, GFP_KERNEL); @@ -1440,6 +1512,15 @@ int airoha_ppe_init(struct airoha_eth *eth) return -ENOMEM; } + ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries, + GFP_KERNEL); + if (!ppe->foe_check_time) + return -ENOMEM; + + err = airoha_ppe_flush_sram_entries(ppe); + if (err) + return err; + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); if (err) return err; diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c index 05a756233f6a..0112c41150bb 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -53,9 +53,10 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, [AIROHA_FOE_STATE_FIN] = "FIN", }; struct airoha_ppe *ppe = m->private; + u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); int i; - for (i = 0; i < PPE_NUM_ENTRIES; i++) { + for (i = 0; i < ppe_num_entries; i++) { const char *state_str, *type_str = "UNKNOWN"; void *src_addr = NULL, *dest_addr = NULL; u16 *src_port = NULL, *dest_port = NULL; diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index 69c5a143db8c..ed4e3407f4a0 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -23,6 +23,8 @@ #define GDM3_BASE 0x1100 #define GDM4_BASE 0x2500 +#define CDM_BASE(_n) \ + ((_n) == 2 ? CDM2_BASE : CDM1_BASE) #define GDM_BASE(_n) \ ((_n) == 4 ? GDM4_BASE : \ (_n) == 3 ? GDM3_BASE : \ @@ -109,30 +111,24 @@ #define PATN_DP_MASK GENMASK(31, 16) #define PATN_SP_MASK GENMASK(15, 0) -#define REG_CDM1_VLAN_CTRL CDM1_BASE -#define CDM1_VLAN_MASK GENMASK(31, 16) +#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n) +#define CDM_VLAN_MASK GENMASK(31, 16) -#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) -#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) +#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08) +#define CDM_OAM_QSEL_MASK GENMASK(31, 27) +#define CDM_VIP_QSEL_MASK GENMASK(24, 20) -#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) -#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) - -#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) -#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) -#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) - -#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) -#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ +#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2)) +#define CDM_CRSN_QSEL_REASON_MASK(_n) \ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) #define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) -#define GDM_DROP_CRC_ERR BIT(23) -#define GDM_IP4_CKSUM BIT(22) -#define GDM_TCP_CKSUM BIT(21) -#define GDM_UDP_CKSUM BIT(20) -#define GDM_STRIP_CRC BIT(16) +#define GDM_PAD_EN_MASK BIT(28) +#define GDM_DROP_CRC_ERR_MASK BIT(23) +#define GDM_IP4_CKSUM_MASK BIT(22) +#define GDM_TCP_CKSUM_MASK BIT(21) +#define GDM_UDP_CKSUM_MASK BIT(20) +#define GDM_STRIP_CRC_MASK BIT(16) #define GDM_UCFQ_MASK GENMASK(15, 12) #define GDM_BCFQ_MASK GENMASK(11, 8) #define GDM_MCFQ_MASK GENMASK(7, 4) @@ -156,6 +152,10 @@ #define LBK_CHAN_MODE_MASK BIT(1) #define LPBK_EN_MASK BIT(0) +#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20) +#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) + #define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) #define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28) @@ -168,10 +168,10 @@ #define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) #define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) -#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) +#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4) #define FE_STRICT_RFC2819_MODE_MASK BIT(31) -#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) -#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) +#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17) +#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16) #define FE_TX_MIB_ID_MASK GENMASK(15, 8) #define FE_RX_MIB_ID_MASK GENMASK(7, 0) @@ -214,6 +214,33 @@ #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) +#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c) +#define GDM_SPORT_OFF2_MASK GENMASK(19, 16) +#define GDM_SPORT_OFF1_MASK GENMASK(15, 12) +#define GDM_SPORT_OFF0_MASK GENMASK(11, 8) + +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) + +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) + #define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200) #define PPE_GLO_CFG_BUSY_MASK BIT(31) #define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9) @@ -326,44 +353,6 @@ #define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374) -#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) -#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) -#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) -#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) - -#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) -#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) -#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) -#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) -#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) -#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) -#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) -#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) -#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) -#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) -#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) -#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) -#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) -#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) -#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) -#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) - -#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) -#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) -#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) - -#define REG_GDM3_FWD_CFG GDM3_BASE -#define GDM3_PAD_EN_MASK BIT(28) - -#define REG_GDM4_FWD_CFG GDM4_BASE -#define GDM4_PAD_EN_MASK BIT(28) -#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) - -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c) -#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) -#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) -#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) - #define REG_IP_FRAG_FP 0x2010 #define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) #define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) @@ -383,10 +372,8 @@ #define REG_MC_VLAN_DATA 0x2108 #define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2)) -#define SP_CPORT_PCIE1_MASK GENMASK(31, 28) -#define SP_CPORT_PCIE0_MASK GENMASK(27, 24) -#define SP_CPORT_USB_MASK GENMASK(7, 4) -#define SP_CPORT_ETH_MASK GENMASK(7, 4) +#define SP_CPORT_DFT_MASK GENMASK(2, 0) +#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2)) #define REG_SRC_PORT_FC_MAP6 0x2298 #define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24) diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 82f2363a45cd..e5a56bb989da 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -401,9 +401,6 @@ struct altera_tse_private { /* MAC address space */ struct altera_tse_mac __iomem *mac_dev; - /* TSE Revision */ - u32 revision; - /* mSGDMA Rx Dispatcher address space */ void __iomem *rx_dma_csr; void __iomem *rx_dma_desc; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3f6204de9e6b..ca55c5fd11df 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -892,9 +892,6 @@ static int tse_open(struct net_device *dev) netdev_warn(dev, "device MAC address %pM\n", dev->dev_addr); - if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) - netdev_warn(dev, "TSE revision %x\n", priv->revision); - spin_lock(&priv->mac_cfg_lock); ret = reset_mac(priv); @@ -1142,6 +1139,7 @@ static int altera_tse_probe(struct platform_device *pdev) struct net_device *ndev; void __iomem *descmap; int ret = -ENODEV; + u32 revision; ndev = alloc_etherdev(sizeof(struct altera_tse_private)); if (!ndev) { @@ -1150,6 +1148,7 @@ static int altera_tse_probe(struct platform_device *pdev) } SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); priv->device = &pdev->dev; @@ -1387,25 +1386,7 @@ static int altera_tse_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->rxdma_irq_lock); - netif_carrier_off(ndev); - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "failed to register TSE net device\n"); - goto err_register_netdev; - } - - platform_set_drvdata(pdev, ndev); - - priv->revision = ioread32(&priv->mac_dev->megacore_revision); - - if (netif_msg_probe(priv)) - dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", - (priv->revision >> 8) & 0xff, - priv->revision & 0xff, - (unsigned long) control_port->start, priv->rx_irq, - priv->tx_irq); - - snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name); + snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", dev_name(&pdev->dev)); pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc); if (IS_ERR(pcs_bus)) { ret = PTR_ERR(pcs_bus); @@ -1442,12 +1423,30 @@ static int altera_tse_probe(struct platform_device *pdev) goto err_init_phylink; } + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register TSE net device\n"); + goto err_register_netdev; + } + + revision = ioread32(&priv->mac_dev->megacore_revision); + + if (revision < 0xd00 || revision > 0xe00) + netdev_warn(ndev, "TSE revision %x\n", revision); + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", + (revision >> 8) & 0xff, revision & 0xff, + (unsigned long)control_port->start, priv->rx_irq, + priv->tx_irq); + return 0; + +err_register_netdev: + phylink_destroy(priv->phylink); err_init_phylink: lynx_pcs_destroy(priv->pcs); err_init_pcs: - unregister_netdev(ndev); -err_register_netdev: netif_napi_del(&priv->napi); altera_tse_mdio_destroy(ndev); err_free_netdev: diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index b39c6f3e1eda..d54dca3074eb 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -165,6 +165,7 @@ config AMD_XGBE select CRC32 select PHYLIB select AMD_XGBE_HAVE_ECC if X86 + select NET_SELFTESTS help This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 980e27652237..5992f7fd4d9b 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -5,7 +5,7 @@ amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \ xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \ - xgbe-platform.o + xgbe-platform.o xgbe-selftest.o amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index e5391a2eca51..b646ae575e6a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -211,6 +211,7 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) } XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); + pdata->sph = true; } static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) @@ -223,6 +224,7 @@ static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); } + pdata->sph = false; } static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, @@ -3578,3 +3580,20 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) DBGPR("<--xgbe_init_function_ptrs\n"); } + +int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Enable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1); + + /* Wait for loopback to stabilize */ + usleep_range(10, 15); + + return 0; +} + +void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Disable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 4dc631af7933..f3adf29b222b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1754,27 +1754,6 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) return 0; } -static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) -{ - struct xgbe_prv_data *pdata = netdev_priv(netdev); - int ret; - - switch (cmd) { - case SIOCGHWTSTAMP: - ret = xgbe_get_hwtstamp_settings(pdata, ifreq); - break; - - case SIOCSHWTSTAMP: - ret = xgbe_set_hwtstamp_settings(pdata, ifreq); - break; - - default: - ret = -EOPNOTSUPP; - } - - return ret; -} - static int xgbe_change_mtu(struct net_device *netdev, int mtu) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -2020,7 +1999,6 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_set_rx_mode = xgbe_set_rx_mode, .ndo_set_mac_address = xgbe_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, .ndo_tx_timeout = xgbe_tx_timeout, .ndo_get_stats64 = xgbe_get_stats64, @@ -2033,6 +2011,8 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_fix_features = xgbe_fix_features, .ndo_set_features = xgbe_set_features, .ndo_features_check = xgbe_features_check, + .ndo_hwtstamp_get = xgbe_get_hwtstamp_settings, + .ndo_hwtstamp_set = xgbe_set_hwtstamp_settings, }; const struct net_device_ops *xgbe_get_netdev_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index b6e1b67a2d0e..0d19b09497a0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -85,6 +85,9 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) int i; switch (stringset) { + case ETH_SS_TEST: + xgbe_selftest_get_strings(pdata, data); + break; case ETH_SS_STATS: for (i = 0; i < XGBE_STATS_COUNT; i++) ethtool_puts(&data, xgbe_gstring_stats[i].stat_string); @@ -131,6 +134,9 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset) int ret; switch (stringset) { + case ETH_SS_TEST: + ret = xgbe_selftest_get_count(pdata); + break; case ETH_SS_STATS: ret = XGBE_STATS_COUNT + (pdata->tx_ring_count * 2) + @@ -760,6 +766,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .set_ringparam = xgbe_set_ringparam, .get_channels = xgbe_get_channels, .set_channels = xgbe_set_channels, + .self_test = xgbe_selftest_run, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c index bc52e5ec6420..0127988e10be 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c @@ -157,26 +157,24 @@ unlock: spin_unlock_irqrestore(&pdata->tstamp_lock, flags); } -int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) +int xgbe_get_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, - sizeof(pdata->tstamp_config))) - return -EFAULT; + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + *config = pdata->tstamp_config; return 0; } -int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) +int xgbe_set_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; - unsigned int mac_tscr; - - if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) - return -EFAULT; - - mac_tscr = 0; + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int mac_tscr = 0; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: break; @@ -188,7 +186,7 @@ int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; @@ -290,7 +288,7 @@ int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) xgbe_config_tstamp(pdata, mac_tscr); - memcpy(&pdata->tstamp_config, &config, sizeof(config)); + pdata->tstamp_config = *config; return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index a56efc1bee33..35a381a83647 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -668,7 +668,7 @@ static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad, else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg); else - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; xgbe_phy_put_comm_ownership(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c new file mode 100644 index 000000000000..55e5e467facd --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) +/* + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved + * + * Author: Raju Rangoju <Raju.Rangoju@amd.com> + */ +#include <linux/crc32.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <net/tcp.h> +#include <net/udp.h> +#include <net/checksum.h> +#include <net/selftests.h> + +#include "xgbe.h" +#include "xgbe-common.h" + +#define XGBE_LOOPBACK_NONE 0 +#define XGBE_LOOPBACK_MAC 1 +#define XGBE_LOOPBACK_PHY 2 + +struct xgbe_test { + char name[ETH_GSTRING_LEN]; + int lb; + int (*fn)(struct xgbe_prv_data *pdata); +}; + +static u8 xgbe_test_id; + +static int xgbe_test_loopback_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct net_test_priv *tdata = pt->af_packet_priv; + const unsigned char *dst = tdata->packet->dst; + const unsigned char *src = tdata->packet->src; + struct netsfhdr *hdr; + struct ethhdr *eh; + struct tcphdr *th; + struct udphdr *uh; + struct iphdr *ih; + int eat; + + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + goto out; + + eat = (skb->tail + skb->data_len) - skb->end; + if (eat > 0 && skb_shared(skb)) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; + } + + if (skb_linearize(skb)) + goto out; + + if (skb_headlen(skb) < (NET_TEST_PKT_SIZE - ETH_HLEN)) + goto out; + + eh = (struct ethhdr *)skb_mac_header(skb); + if (dst) { + if (!ether_addr_equal_unaligned(eh->h_dest, dst)) + goto out; + } + if (src) { + if (!ether_addr_equal_unaligned(eh->h_source, src)) + goto out; + } + + ih = ip_hdr(skb); + + if (tdata->packet->tcp) { + if (ih->protocol != IPPROTO_TCP) + goto out; + + th = (struct tcphdr *)((u8 *)ih + 4 * ih->ihl); + if (th->dest != htons(tdata->packet->dport)) + goto out; + + hdr = (struct netsfhdr *)((u8 *)th + sizeof(*th)); + } else { + if (ih->protocol != IPPROTO_UDP) + goto out; + + uh = (struct udphdr *)((u8 *)ih + 4 * ih->ihl); + if (uh->dest != htons(tdata->packet->dport)) + goto out; + + hdr = (struct netsfhdr *)((u8 *)uh + sizeof(*uh)); + } + + if (hdr->magic != cpu_to_be64(NET_TEST_PKT_MAGIC)) + goto out; + if (tdata->packet->id != hdr->id) + goto out; + + tdata->ok = true; + complete(&tdata->comp); +out: + kfree_skb(skb); + return 0; +} + +static int __xgbe_test_loopback(struct xgbe_prv_data *pdata, + struct net_packet_attrs *attr) +{ + struct net_test_priv *tdata; + struct sk_buff *skb = NULL; + int ret = 0; + + tdata = kzalloc(sizeof(*tdata), GFP_KERNEL); + if (!tdata) + return -ENOMEM; + + tdata->ok = false; + init_completion(&tdata->comp); + + tdata->pt.type = htons(ETH_P_IP); + tdata->pt.func = xgbe_test_loopback_validate; + tdata->pt.dev = pdata->netdev; + tdata->pt.af_packet_priv = tdata; + tdata->packet = attr; + + dev_add_pack(&tdata->pt); + + skb = net_test_get_skb(pdata->netdev, xgbe_test_id, attr); + if (!skb) { + ret = -ENOMEM; + goto cleanup; + } + + xgbe_test_id++; + ret = dev_direct_xmit(skb, attr->queue_mapping); + if (ret) + goto cleanup; + + if (!attr->timeout) + attr->timeout = NET_LB_TIMEOUT; + + wait_for_completion_timeout(&tdata->comp, attr->timeout); + ret = tdata->ok ? 0 : -ETIMEDOUT; + + if (ret) + netdev_err(pdata->netdev, "Response timedout: ret %d\n", ret); +cleanup: + dev_remove_pack(&tdata->pt); + kfree(tdata); + return ret; +} + +static int xgbe_test_mac_loopback(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + + attr.dst = pdata->netdev->dev_addr; + return __xgbe_test_loopback(pdata, &attr); +} + +static int xgbe_test_phy_loopback(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + int ret; + + if (!pdata->netdev->phydev) { + netdev_err(pdata->netdev, "phydev not found: cannot start PHY loopback test\n"); + return -EOPNOTSUPP; + } + + ret = phy_loopback(pdata->netdev->phydev, true, 0); + if (ret) + return ret; + + attr.dst = pdata->netdev->dev_addr; + ret = __xgbe_test_loopback(pdata, &attr); + + phy_loopback(pdata->netdev->phydev, false, 0); + return ret; +} + +static int xgbe_test_sph(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + unsigned long cnt_end, cnt_start; + int ret; + + cnt_start = pdata->ext_stats.rx_split_header_packets; + + if (!pdata->sph) { + netdev_err(pdata->netdev, "Split Header not enabled\n"); + return -EOPNOTSUPP; + } + + /* UDP test */ + attr.dst = pdata->netdev->dev_addr; + attr.tcp = false; + + ret = __xgbe_test_loopback(pdata, &attr); + if (ret) + return ret; + + cnt_end = pdata->ext_stats.rx_split_header_packets; + if (cnt_end <= cnt_start) + return -EINVAL; + + /* TCP test */ + cnt_start = cnt_end; + + attr.dst = pdata->netdev->dev_addr; + attr.tcp = true; + + ret = __xgbe_test_loopback(pdata, &attr); + if (ret) + return ret; + + cnt_end = pdata->ext_stats.rx_split_header_packets; + if (cnt_end <= cnt_start) + return -EINVAL; + + return 0; +} + +static int xgbe_test_jumbo(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + int size = pdata->rx_buf_size; + + attr.dst = pdata->netdev->dev_addr; + attr.max_size = size - ETH_FCS_LEN; + + return __xgbe_test_loopback(pdata, &attr); +} + +static const struct xgbe_test xgbe_selftests[] = { + { + .name = "MAC Loopback ", + .lb = XGBE_LOOPBACK_MAC, + .fn = xgbe_test_mac_loopback, + }, { + .name = "PHY Loopback ", + .lb = XGBE_LOOPBACK_NONE, + .fn = xgbe_test_phy_loopback, + }, { + .name = "Split Header ", + .lb = XGBE_LOOPBACK_PHY, + .fn = xgbe_test_sph, + }, { + .name = "Jumbo Frame ", + .lb = XGBE_LOOPBACK_PHY, + .fn = xgbe_test_jumbo, + }, +}; + +void xgbe_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct xgbe_prv_data *pdata = netdev_priv(dev); + int count = xgbe_selftest_get_count(pdata); + int i, ret; + + memset(buf, 0, sizeof(*buf) * count); + xgbe_test_id = 0; + + if (etest->flags != ETH_TEST_FL_OFFLINE) { + netdev_err(pdata->netdev, "Only offline tests are supported\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } else if (!netif_carrier_ok(dev)) { + netdev_err(pdata->netdev, + "Invalid link, cannot execute tests\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* Wait for queues drain */ + msleep(200); + + for (i = 0; i < count; i++) { + ret = 0; + + switch (xgbe_selftests[i].lb) { + case XGBE_LOOPBACK_PHY: + ret = -EOPNOTSUPP; + if (dev->phydev) + ret = phy_loopback(dev->phydev, true, 0); + if (!ret) + break; + fallthrough; + case XGBE_LOOPBACK_MAC: + ret = xgbe_enable_mac_loopback(pdata); + break; + case XGBE_LOOPBACK_NONE: + break; + default: + ret = -EOPNOTSUPP; + break; + } + + /* + * First tests will always be MAC / PHY loopback. + * If any of them is not supported we abort earlier. + */ + if (ret) { + netdev_err(pdata->netdev, "Loopback not supported\n"); + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + + ret = xgbe_selftests[i].fn(pdata); + if (ret && (ret != -EOPNOTSUPP)) + etest->flags |= ETH_TEST_FL_FAILED; + buf[i] = ret; + + switch (xgbe_selftests[i].lb) { + case XGBE_LOOPBACK_PHY: + ret = -EOPNOTSUPP; + if (dev->phydev) + ret = phy_loopback(dev->phydev, false, 0); + if (!ret) + break; + fallthrough; + case XGBE_LOOPBACK_MAC: + xgbe_disable_mac_loopback(pdata); + break; + default: + break; + } + } +} + +void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data) +{ + u8 *p = data; + int i; + + for (i = 0; i < xgbe_selftest_get_count(pdata); i++) + ethtool_puts(&p, xgbe_selftests[i].name); +} + +int xgbe_selftest_get_count(struct xgbe_prv_data *pdata) +{ + return ARRAY_SIZE(xgbe_selftests); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e8bbb6805901..03ef0f548483 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1146,7 +1146,7 @@ struct xgbe_prv_data { spinlock_t tstamp_lock; struct ptp_clock_info ptp_clock_info; struct ptp_clock *ptp_clock; - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; unsigned int tstamp_addend; struct work_struct tx_tstamp_work; struct sk_buff *tx_tstamp_skb; @@ -1246,6 +1246,7 @@ struct xgbe_prv_data { int rx_adapt_retries; bool rx_adapt_done; bool mode_set; + bool sph; }; /* Function prototypes*/ @@ -1307,10 +1308,11 @@ void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, unsigned int nsec); void xgbe_tx_tstamp(struct work_struct *work); -int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, - struct ifreq *ifreq); -int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, - struct ifreq *ifreq); +int xgbe_get_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int xgbe_set_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, struct sk_buff *skb, struct xgbe_packet_data *packet); @@ -1321,6 +1323,16 @@ void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg, int index, bool on); +/* Selftest functions */ +void xgbe_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf); +void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data); +int xgbe_selftest_get_count(struct xgbe_prv_data *pdata); + +/* Loopback control */ +int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata); +void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata); + #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); void xgbe_debugfs_exit(struct xgbe_prv_data *); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index b565189e5913..4ef4fe64b8ac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -258,10 +258,15 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) (void)aq_nic_set_multicast_list(aq_nic, ndev); } -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) -static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, - struct hwtstamp_config *config) +static int aq_ndev_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { + struct aq_nic_s *aq_nic = netdev_priv(netdev); + + if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp) + return -EOPNOTSUPP; + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: @@ -290,59 +295,17 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config); } -#endif - -static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr) -{ - struct hwtstamp_config config; -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - int ret_val; -#endif - - if (!aq_nic->aq_ptp) - return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - ret_val = aq_ndev_config_hwtstamp(aq_nic, &config); - if (ret_val) - return ret_val; -#endif - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) -static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr) +static int aq_ndev_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; + struct aq_nic_s *aq_nic = netdev_priv(netdev); if (!aq_nic->aq_ptp) return -EOPNOTSUPP; - aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config); - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} -#endif - -static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct aq_nic_s *aq_nic = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return aq_ndev_hwtstamp_set(aq_nic, ifr); - -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - case SIOCGHWTSTAMP: - return aq_ndev_hwtstamp_get(aq_nic, ifr); -#endif - } - - return -EOPNOTSUPP; + aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config); + return 0; } static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, @@ -500,12 +463,13 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, .ndo_fix_features = aq_ndev_fix_features, - .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, .ndo_bpf = aq_xdp, .ndo_xdp_xmit = aq_xdp_xmit, + .ndo_hwtstamp_get = aq_ndev_hwtstamp_get, + .ndo_hwtstamp_set = aq_ndev_hwtstamp_set, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 5acb3e16b567..0fa0f891c0e0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -51,7 +51,7 @@ struct ptp_tx_timeout { struct aq_ptp_s { struct aq_nic_s *aq_nic; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; spinlock_t ptp_lock; spinlock_t ptp_ring_lock; struct ptp_clock *ptp_clock; @@ -567,7 +567,7 @@ static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtsta } void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { *config = aq_ptp->hwtstamp_config; } @@ -588,7 +588,7 @@ static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp) } int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { struct aq_nic_s *aq_nic = aq_ptp->aq_nic; const struct aq_hw_ops *hw_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h index 210b723f2207..5e643ec7cc06 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h @@ -60,9 +60,9 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp); /* Must be to check available of PTP before call */ void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); /* Return either ring is belong to PTP or not*/ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring); @@ -130,9 +130,9 @@ static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb) static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {} static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) {} + struct kernel_hwtstamp_config *config) {} static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { return 0; } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 9fdef874f5ca..666522d64775 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -25,6 +25,7 @@ config B44 select SSB select MII select PHYLIB + select FIXED_PHY if BCM47XX help If you have a network (Ethernet) controller of this type, say Y or M here. diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index 63f1a8c3a7fb..dd80ccfca19d 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -163,11 +163,30 @@ static void bcmasp_set_msglevel(struct net_device *dev, u32 level) static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_priv *priv = intf->parent; + struct device *kdev = &priv->pdev->dev; + u32 phy_wolopts = 0; + + if (dev->phydev) { + phy_ethtool_get_wol(dev->phydev, wol); + phy_wolopts = wol->wolopts; + } + + /* MAC is not wake-up capable, return what the PHY does */ + if (!device_can_wakeup(kdev)) + return; + + /* Overlay MAC capabilities with that of the PHY queried before */ + wol->supported |= BCMASP_SUPPORTED_WAKE; + wol->wolopts |= intf->wolopts; + + /* Return the PHY configured magic password */ + if (phy_wolopts & WAKE_MAGICSECURE) + return; - wol->supported = BCMASP_SUPPORTED_WAKE; - wol->wolopts = intf->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass)); + /* Otherwise the MAC one */ if (wol->wolopts & WAKE_MAGICSECURE) memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass)); } @@ -177,10 +196,21 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct bcmasp_intf *intf = netdev_priv(dev); struct bcmasp_priv *priv = intf->parent; struct device *kdev = &priv->pdev->dev; + int ret = 0; + + /* Try Wake-on-LAN from the PHY first */ + if (dev->phydev) { + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (ret != -EOPNOTSUPP && wol->wolopts) + return ret; + } if (!device_can_wakeup(kdev)) return -EOPNOTSUPP; + if (wol->wolopts & ~BCMASP_SUPPORTED_WAKE) + return -EINVAL; + /* Interface Specific */ intf->wolopts = wol->wolopts; if (intf->wolopts & WAKE_MAGICSECURE) diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 0353359c3fe9..888f28f11406 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -31,6 +31,7 @@ #include <linux/ssb/ssb.h> #include <linux/slab.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -2233,7 +2234,6 @@ static int b44_register_phy_one(struct b44 *bp) struct mii_bus *mii_bus; struct ssb_device *sdev = bp->sdev; struct phy_device *phydev; - char bus_id[MII_BUS_ID_SIZE + 3]; struct ssb_sprom *sprom = &sdev->bus->sprom; int err; @@ -2260,27 +2260,26 @@ static int b44_register_phy_one(struct b44 *bp) goto err_out_mdiobus; } - if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && - (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { - + phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr); + if (!phydev && + sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) { dev_info(sdev->dev, "could not find PHY at %i, use fixed one\n", bp->phy_addr); - bp->phy_addr = 0; - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0", - bp->phy_addr); - } else { - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, - bp->phy_addr); + phydev = fixed_phy_register_100fd(); + if (!IS_ERR(phydev)) + bp->phy_addr = phydev->mdio.addr; } - phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(phydev)) { + if (IS_ERR_OR_NULL(phydev)) + err = -ENODEV; + else + err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { dev_err(sdev->dev, "could not attach PHY at %i\n", bp->phy_addr); - err = PTR_ERR(phydev); goto err_out_mdiobus_unregister; } @@ -2293,7 +2292,6 @@ static int b44_register_phy_one(struct b44 *bp) linkmode_copy(phydev->advertising, phydev->supported); bp->old_link = 0; - bp->phy_addr = phydev->mdio.addr; phy_attached_info(phydev); @@ -2311,10 +2309,15 @@ err_out: static void b44_unregister_phy_one(struct b44 *bp) { - struct net_device *dev = bp->dev; struct mii_bus *mii_bus = bp->mii_bus; + struct net_device *dev = bp->dev; + struct phy_device *phydev; + + phydev = dev->phydev; - phy_disconnect(dev->phydev); + phy_disconnect(phydev); + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 0abaa2bbe357..a8a74f07bb54 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -952,7 +952,6 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) snprintf(ptp_info->pin_config[i].name, sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i); ptp_info->pin_config[i].index = i; - ptp_info->pin_config[i].chan = i; if (*pin_usg == BNXT_PPS_PIN_PPS_IN) ptp_info->pin_config[i].func = PTP_PF_EXTTS; else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT) @@ -969,6 +968,8 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) ptp_info->n_per_out = 1; ptp_info->pps = 1; ptp_info->verify = bnxt_ptp_verify; + ptp_info->supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS; + ptp_info->supported_perout_flags = PTP_PEROUT_DUTY_CYCLE; return 0; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 98971ae4f87d..d99ef92feb82 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -35,7 +35,6 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/phy.h> -#include <linux/platform_data/bcmgenet.h> #include <linux/unaligned.h> @@ -3926,7 +3925,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match); static int bcmgenet_probe(struct platform_device *pdev) { - struct bcmgenet_platform_data *pd = pdev->dev.platform_data; const struct bcmgenet_plat_data *pdata; struct bcmgenet_priv *priv; struct net_device *dev; @@ -4010,9 +4008,6 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->version = pdata->version; priv->dma_max_burst_length = pdata->dma_max_burst_length; priv->flags = pdata->flags; - } else { - priv->version = pd->genet_version; - priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; } priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); @@ -4062,16 +4057,13 @@ static int bcmgenet_probe(struct platform_device *pdev) if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - if (pd && !IS_ERR_OR_NULL(pd->mac_address)) - eth_hw_addr_set(dev, pd->mac_address); - else - if (device_get_ethdev_address(&pdev->dev, dev)) - if (has_acpi_companion(&pdev->dev)) { - u8 addr[ETH_ALEN]; + if (device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; - bcmgenet_get_hw_addr(priv, addr); - eth_hw_addr_set(dev, addr); - } + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } if (!is_valid_ether_addr(dev->dev_addr)) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 573e8b279e52..38f854b94a79 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -20,7 +20,6 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/platform_data/bcmgenet.h> #include <linux/platform_data/mdio-bcm-unimac.h> #include "bcmgenet.h" @@ -436,23 +435,6 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) return priv->mdio_dn; } -static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, - struct unimac_mdio_pdata *ppd) -{ - struct device *kdev = &priv->pdev->dev; - struct bcmgenet_platform_data *pd = kdev->platform_data; - - if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { - /* - * Internal or external PHY with MDIO access - */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - ppd->phy_mask = 1 << pd->phy_address; - else - ppd->phy_mask = 0; - } -} - static int bcmgenet_mii_wait(void *wait_func_data) { struct bcmgenet_priv *priv = wait_func_data; @@ -467,7 +449,6 @@ static int bcmgenet_mii_wait(void *wait_func_data) static int bcmgenet_mii_register(struct bcmgenet_priv *priv) { struct platform_device *pdev = priv->pdev; - struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; struct device_node *dn = pdev->dev.of_node; struct unimac_mdio_pdata ppd; struct platform_device *ppdev; @@ -511,8 +492,6 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv) ppdev->dev.parent = &pdev->dev; if (dn) ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); - else if (pdata) - bcmgenet_mii_pdata_init(priv, &ppd); else ppd.phy_mask = ~0; @@ -594,58 +573,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) return 0; } -static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) -{ - struct device *kdev = &priv->pdev->dev; - struct bcmgenet_platform_data *pd = kdev->platform_data; - char phy_name[MII_BUS_ID_SIZE + 3]; - char mdio_bus_id[MII_BUS_ID_SIZE]; - struct phy_device *phydev; - - snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", - UNIMAC_MDIO_DRV_NAME, priv->pdev->id); - - if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { - snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, - mdio_bus_id, pd->phy_address); - - /* - * Internal or external PHY with MDIO access - */ - phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); - if (IS_ERR(phydev)) { - dev_err(kdev, "failed to register PHY device\n"); - return PTR_ERR(phydev); - } - } else { - /* - * MoCA port or no MDIO access. - * Use fixed PHY to represent the link layer. - */ - struct fixed_phy_status fphy_status = { - .link = 1, - .speed = pd->phy_speed, - .duplex = pd->phy_duplex, - .pause = 0, - .asym_pause = 0, - }; - - phydev = fixed_phy_register(&fphy_status, NULL); - if (IS_ERR(phydev)) { - dev_err(kdev, "failed to register fixed PHY device\n"); - return PTR_ERR(phydev); - } - - /* Make sure we initialize MoCA PHYs with a link down */ - phydev->link = 0; - - } - - priv->phy_interface = pd->phy_interface; - - return 0; -} - static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -656,7 +583,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) else if (has_acpi_companion(kdev)) return bcmgenet_phy_interface_init(priv); else - return bcmgenet_mii_pd_init(priv); + return -EINVAL; } int bcmgenet_mii_init(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d78cafdb2094..e21f7c6a6de7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12719,29 +12719,17 @@ static int tg3_get_sset_count(struct net_device *dev, int sset) } } -static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) +static u32 tg3_get_rx_ring_count(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); if (!tg3_flag(tp, SUPPORT_MSIX)) - return -EOPNOTSUPP; + return 1; - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - if (netif_running(tp->dev)) - info->data = tp->rxq_cnt; - else { - info->data = num_online_cpus(); - if (info->data > TG3_RSS_MAX_NUM_QS) - info->data = TG3_RSS_MAX_NUM_QS; - } + if (netif_running(tp->dev)) + return tp->rxq_cnt; - return 0; - - default: - return -EOPNOTSUPP; - } + return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max); } static u32 tg3_get_rxfh_indir_size(struct net_device *dev) @@ -14268,7 +14256,7 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, .get_sset_count = tg3_get_sset_count, - .get_rxnfc = tg3_get_rxnfc, + .get_rx_ring_count = tg3_get_rx_ring_count, .get_rxfh_indir_size = tg3_get_rxfh_indir_size, .get_rxfh = tg3_get_rxfh, .set_rxfh = tg3_set_rxfh, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 0830c48973aa..87414a2ddf6e 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -15,10 +15,6 @@ #include <linux/phy/phy.h> #include <linux/workqueue.h> -#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP) -#define MACB_EXT_DESC -#endif - #define MACB_GREGS_NBR 16 #define MACB_GREGS_VERSION 2 #define MACB_MAX_QUEUES 8 @@ -541,6 +537,8 @@ /* Bitfields in DCFG6. */ #define GEM_PBUF_LSO_OFFSET 27 #define GEM_PBUF_LSO_SIZE 1 +#define GEM_PBUF_RSC_OFFSET 26 +#define GEM_PBUF_RSC_SIZE 1 #define GEM_PBUF_CUTTHRU_OFFSET 25 #define GEM_PBUF_CUTTHRU_SIZE 1 #define GEM_DAW64_OFFSET 23 @@ -756,27 +754,31 @@ #define MACB_MAN_C45_CODE 2 /* Capability mask bits */ -#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 -#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 -#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 -#define MACB_CAPS_USRIO_DISABLED 0x00000010 -#define MACB_CAPS_JUMBO 0x00000020 -#define MACB_CAPS_GEM_HAS_PTP 0x00000040 -#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 -#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 -#define MACB_CAPS_MIIONRGMII 0x00000200 -#define MACB_CAPS_NEED_TSUCLK 0x00000400 -#define MACB_CAPS_QUEUE_DISABLE 0x00000800 -#define MACB_CAPS_QBV 0x00001000 -#define MACB_CAPS_PCS 0x01000000 -#define MACB_CAPS_HIGH_SPEED 0x02000000 -#define MACB_CAPS_CLK_HW_CHG 0x04000000 -#define MACB_CAPS_MACB_IS_EMAC 0x08000000 -#define MACB_CAPS_FIFO_MODE 0x10000000 -#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 -#define MACB_CAPS_SG_DISABLED 0x40000000 -#define MACB_CAPS_MACB_IS_GEM 0x80000000 +#define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0) +#define MACB_CAPS_USRIO_HAS_CLKEN BIT(1) +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2) +#define MACB_CAPS_NO_GIGABIT_HALF BIT(3) +#define MACB_CAPS_USRIO_DISABLED BIT(4) +#define MACB_CAPS_JUMBO BIT(5) +#define MACB_CAPS_GEM_HAS_PTP BIT(6) +#define MACB_CAPS_BD_RD_PREFETCH BIT(7) +#define MACB_CAPS_NEEDS_RSTONUBR BIT(8) +#define MACB_CAPS_MIIONRGMII BIT(9) +#define MACB_CAPS_NEED_TSUCLK BIT(10) +#define MACB_CAPS_QUEUE_DISABLE BIT(11) +#define MACB_CAPS_QBV BIT(12) +#define MACB_CAPS_PCS BIT(13) +#define MACB_CAPS_HIGH_SPEED BIT(14) +#define MACB_CAPS_CLK_HW_CHG BIT(15) +#define MACB_CAPS_MACB_IS_EMAC BIT(16) +#define MACB_CAPS_FIFO_MODE BIT(17) +#define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18) +#define MACB_CAPS_SG_DISABLED BIT(19) +#define MACB_CAPS_MACB_IS_GEM BIT(20) +#define MACB_CAPS_DMA_64B BIT(21) +#define MACB_CAPS_DMA_PTP BIT(22) +#define MACB_CAPS_RSC BIT(23) +#define MACB_CAPS_NO_LSO BIT(24) /* LSO settings */ #define MACB_LSO_UFO_ENABLE 0x01 @@ -853,12 +855,6 @@ struct macb_dma_desc { u32 ctrl; }; -#ifdef MACB_EXT_DESC -#define HW_DMA_CAP_32B 0 -#define HW_DMA_CAP_64B (1 << 0) -#define HW_DMA_CAP_PTP (1 << 1) -#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP) - struct macb_dma_desc_64 { u32 addrh; u32 resvd; @@ -868,7 +864,6 @@ struct macb_dma_desc_ptp { u32 ts_1; u32 ts_2; }; -#endif /* DMA descriptor bitfields */ #define MACB_RX_USED_OFFSET 0 @@ -1299,7 +1294,6 @@ struct macb { unsigned int tx_ring_size; unsigned int num_queues; - unsigned int queue_mask; struct macb_queue queues[MACB_MAX_QUEUES]; spinlock_t lock; @@ -1347,11 +1341,8 @@ struct macb { struct macb_ptp_info *ptp_info; /* macb-ptp interface */ - struct phy *sgmii_phy; /* for ZynqMP SGMII mode */ + struct phy *phy; -#ifdef MACB_EXT_DESC - uint8_t hw_dma_cap; -#endif spinlock_t tsu_clk_lock; /* gem tsu clock locking */ unsigned int tsu_rate; struct ptp_clock *ptp_clock; @@ -1443,6 +1434,18 @@ static inline u64 enst_max_hw_interval(u32 speed_mbps) ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps)); } +static inline bool macb_dma64(struct macb *bp) +{ + return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + bp->caps & MACB_CAPS_DMA_64B; +} + +static inline bool macb_dma_ptp(struct macb *bp) +{ + return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && + bp->caps & MACB_CAPS_DMA_PTP; +} + /** * struct macb_platform_data - platform data for MACB Ethernet used for PCI registration * @pclk: platform clock diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ca2386b83473..e461f5072884 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -6,36 +6,36 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/clk.h> +#include <linux/circ_buf.h> #include <linux/clk-provider.h> +#include <linux/clk.h> #include <linux/crc32.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/circ_buf.h> -#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/io.h> #include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/ip.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> -#include <linux/phylink.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/ip.h> -#include <linux/udp.h> -#include <linux/tcp.h> -#include <linux/iopoll.h> #include <linux/phy/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/ptp_classify.h> #include <linux/reset.h> -#include <linux/firmware/xlnx-zynqmp.h> -#include <linux/inetdevice.h> +#include <linux/slab.h> +#include <linux/tcp.h> +#include <linux/types.h> +#include <linux/udp.h> #include <net/pkt_sched.h> #include "macb.h" @@ -121,56 +121,26 @@ struct sifive_fu540_macb_mgmt { */ static unsigned int macb_dma_desc_get_size(struct macb *bp) { -#ifdef MACB_EXT_DESC - unsigned int desc_size; + unsigned int desc_size = sizeof(struct macb_dma_desc); + + if (macb_dma64(bp)) + desc_size += sizeof(struct macb_dma_desc_64); + if (macb_dma_ptp(bp)) + desc_size += sizeof(struct macb_dma_desc_ptp); - switch (bp->hw_dma_cap) { - case HW_DMA_CAP_64B: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_64); - break; - case HW_DMA_CAP_PTP: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_ptp); - break; - case HW_DMA_CAP_64B_PTP: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_64) - + sizeof(struct macb_dma_desc_ptp); - break; - default: - desc_size = sizeof(struct macb_dma_desc); - } return desc_size; -#endif - return sizeof(struct macb_dma_desc); } static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) { -#ifdef MACB_EXT_DESC - switch (bp->hw_dma_cap) { - case HW_DMA_CAP_64B: - case HW_DMA_CAP_PTP: - desc_idx <<= 1; - break; - case HW_DMA_CAP_64B_PTP: - desc_idx *= 3; - break; - default: - break; - } -#endif - return desc_idx; + return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp)); } -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) { return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); } -#endif /* Ring buffer accessors */ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) @@ -357,7 +327,6 @@ static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); mdio_read_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -403,7 +372,6 @@ static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad, status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); mdio_read_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -435,7 +403,6 @@ static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, goto mdio_write_exit; mdio_write_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -481,7 +448,6 @@ static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id, goto mdio_write_exit; mdio_write_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -492,15 +458,13 @@ static void macb_init_buffers(struct macb *bp) struct macb_queue *queue; unsigned int q; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* Single register for all queues' high 32 bits. */ - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { + if (macb_dma64(bp)) { macb_writel(bp, RBQPH, upper_32_bits(bp->queues[0].rx_ring_dma)); macb_writel(bp, TBQPH, upper_32_bits(bp->queues[0].tx_ring_dma)); } -#endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); @@ -1025,10 +989,9 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budge static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) { -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - struct macb_dma_desc_64 *desc_64; + if (macb_dma64(bp)) { + struct macb_dma_desc_64 *desc_64; - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { desc_64 = macb_64b_desc(bp, desc); desc_64->addrh = upper_32_bits(addr); /* The low bits of RX address contain the RX_USED bit, clearing @@ -1037,26 +1000,23 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_ */ dma_wmb(); } -#endif + desc->addr = lower_32_bits(addr); } static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) { dma_addr_t addr = 0; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - struct macb_dma_desc_64 *desc_64; - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { + if (macb_dma64(bp)) { + struct macb_dma_desc_64 *desc_64; + desc_64 = macb_64b_desc(bp, desc); addr = ((u64)(desc_64->addrh) << 32); } -#endif addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); -#ifdef CONFIG_MACB_USE_HWSTAMP - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) + if (macb_dma_ptp(bp)) addr &= ~GEM_BIT(DMA_RXVALID); -#endif return addr; } @@ -1336,8 +1296,19 @@ static void gem_rx_refill(struct macb_queue *queue) dma_wmb(); macb_set_addr(bp, desc, paddr); - /* properly align Ethernet header */ - skb_reserve(skb, NET_IP_ALIGN); + /* Properly align Ethernet header. + * + * Hardware can add dummy bytes if asked using the RBOF + * field inside the NCFGR register. That feature isn't + * available if hardware is RSC capable. + * + * We cannot fallback to doing the 2-byte shift before + * DMA mapping because the address field does not allow + * setting the low 2/3 bits. + * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits. + */ + if (!(bp->caps & MACB_CAPS_RSC)) + skb_reserve(skb, NET_IP_ALIGN); } else { desc->ctrl = 0; dma_wmb(); @@ -2024,14 +1995,14 @@ static unsigned int macb_tx_map(struct macb *bp, struct sk_buff *skb, unsigned int hdrlen) { - dma_addr_t mapping; - unsigned int len, entry, i, tx_head = queue->tx_head; - struct macb_tx_skb *tx_skb = NULL; - struct macb_dma_desc *desc; - unsigned int offset, size, count = 0; unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int eof = 1, mss_mfs = 0; + unsigned int len, i, tx_head = queue->tx_head; u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; + unsigned int eof = 1, mss_mfs = 0; + struct macb_tx_skb *tx_skb = NULL; + struct macb_dma_desc *desc; + unsigned int offset, size; + dma_addr_t mapping; /* LSO */ if (skb_shinfo(skb)->gso_size != 0) { @@ -2051,8 +2022,7 @@ static unsigned int macb_tx_map(struct macb *bp, offset = 0; while (len) { - entry = macb_tx_ring_wrap(bp, tx_head); - tx_skb = &queue->tx_skb[entry]; + tx_skb = macb_tx_skb(queue, tx_head); mapping = dma_map_single(&bp->pdev->dev, skb->data + offset, @@ -2068,10 +2038,9 @@ static unsigned int macb_tx_map(struct macb *bp, len -= size; offset += size; - count++; tx_head++; - size = min(len, bp->max_tx_length); + size = umin(len, bp->max_tx_length); } /* Then, map paged data from fragments */ @@ -2081,9 +2050,8 @@ static unsigned int macb_tx_map(struct macb *bp, len = skb_frag_size(frag); offset = 0; while (len) { - size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(bp, tx_head); - tx_skb = &queue->tx_skb[entry]; + size = umin(len, bp->max_tx_length); + tx_skb = macb_tx_skb(queue, tx_head); mapping = skb_frag_dma_map(&bp->pdev->dev, frag, offset, size, DMA_TO_DEVICE); @@ -2098,7 +2066,6 @@ static unsigned int macb_tx_map(struct macb *bp, len -= size; offset += size; - count++; tx_head++; } } @@ -2120,9 +2087,8 @@ static unsigned int macb_tx_map(struct macb *bp, * to set the end of TX queue */ i = tx_head; - entry = macb_tx_ring_wrap(bp, i); ctrl = MACB_BIT(TX_USED); - desc = macb_tx_desc(queue, entry); + desc = macb_tx_desc(queue, i); desc->ctrl = ctrl; if (lso_ctrl) { @@ -2142,16 +2108,15 @@ static unsigned int macb_tx_map(struct macb *bp, do { i--; - entry = macb_tx_ring_wrap(bp, i); - tx_skb = &queue->tx_skb[entry]; - desc = macb_tx_desc(queue, entry); + tx_skb = macb_tx_skb(queue, i); + desc = macb_tx_desc(queue, i); ctrl = (u32)tx_skb->size; if (eof) { ctrl |= MACB_BIT(TX_LAST); eof = 0; } - if (unlikely(entry == (bp->tx_ring_size - 1))) + if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1)) ctrl |= MACB_BIT(TX_WRAP); /* First descriptor is header descriptor */ @@ -2179,7 +2144,7 @@ static unsigned int macb_tx_map(struct macb *bp, queue->tx_head = tx_head; - return count; + return 0; dma_error: netdev_err(bp->dev, "TX DMA map failed\n"); @@ -2190,7 +2155,7 @@ dma_error: macb_tx_unmap(bp, tx_skb, 0); } - return 0; + return -ENOMEM; } static netdev_features_t macb_features_check(struct sk_buff *skb, @@ -2318,11 +2283,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -#ifdef CONFIG_MACB_USE_HWSTAMP - if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - (bp->hw_dma_cap & HW_DMA_CAP_PTP)) + if (macb_dma_ptp(bp) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -#endif is_lso = (skb_shinfo(skb)->gso_size != 0); @@ -2339,7 +2302,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } } else - hdrlen = min(skb_headlen(skb), bp->max_tx_length); + hdrlen = umin(skb_headlen(skb), bp->max_tx_length); #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, @@ -2378,7 +2341,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Map socket buffer for DMA transfer */ - if (!macb_tx_map(bp, queue, skb, hdrlen)) { + if (macb_tx_map(bp, queue, skb, hdrlen)) { dev_kfree_skb_any(skb); goto unlock; } @@ -2799,14 +2762,10 @@ static void macb_configure_dma(struct macb *bp) dmacfg &= ~GEM_BIT(TXCOEN); dmacfg &= ~GEM_BIT(ADDR64); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) + if (macb_dma64(bp)) dmacfg |= GEM_BIT(ADDR64); -#endif -#ifdef CONFIG_MACB_USE_HWSTAMP - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) + if (macb_dma_ptp(bp)) dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); -#endif netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", dmacfg); gem_writel(bp, DMACFG, dmacfg); @@ -2821,7 +2780,11 @@ static void macb_init_hw(struct macb *bp) macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); - config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ + /* Make eth data aligned. + * If RSC capable, that offset is ignored by HW. + */ + if (!(bp->caps & MACB_CAPS_RSC)) + config |= MACB_BF(RBOF, NET_IP_ALIGN); config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ if (bp->caps & MACB_CAPS_JUMBO) config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ @@ -2998,7 +2961,11 @@ static int macb_open(struct net_device *dev) macb_init_hw(bp); - err = phy_power_on(bp->sgmii_phy); + err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface); + if (err) + goto reset_hw; + + err = phy_power_on(bp->phy); if (err) goto reset_hw; @@ -3014,7 +2981,7 @@ static int macb_open(struct net_device *dev) return 0; phy_off: - phy_power_off(bp->sgmii_phy); + phy_power_off(bp->phy); reset_hw: macb_reset_hw(bp); @@ -3046,7 +3013,7 @@ static int macb_close(struct net_device *dev) phylink_stop(bp->phylink); phylink_disconnect_phy(bp->phylink); - phy_power_off(bp->sgmii_phy); + phy_power_off(bp->phy); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -3582,7 +3549,7 @@ static int gem_get_ts_info(struct net_device *dev, { struct macb *bp = netdev_priv(dev); - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { + if (!macb_dma_ptp(bp)) { ethtool_op_get_ts_info(dev, info); return 0; } @@ -4108,6 +4075,8 @@ static int macb_taprio_setup_replace(struct net_device *ndev, struct macb *bp = netdev_priv(ndev); struct ethtool_link_ksettings kset; struct macb_queue *queue; + u32 queue_mask; + u8 queue_id; size_t i; int err; @@ -4159,8 +4128,9 @@ static int macb_taprio_setup_replace(struct net_device *ndev, goto cleanup; } - /* gate_mask must not select queues outside the valid queue_mask */ - if (entry->gate_mask & ~bp->queue_mask) { + /* gate_mask must not select queues outside the valid queues */ + queue_id = order_base_2(entry->gate_mask); + if (queue_id >= bp->num_queues) { netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n", i, entry->gate_mask, bp->num_queues); err = -EINVAL; @@ -4194,7 +4164,7 @@ static int macb_taprio_setup_replace(struct net_device *ndev, goto cleanup; } - enst_queue[i].queue_id = order_base_2(entry->gate_mask); + enst_queue[i].queue_id = queue_id; enst_queue[i].start_time_mask = (start_time_sec << GEM_START_TIME_SEC_OFFSET) | start_time_nsec; @@ -4222,8 +4192,9 @@ static int macb_taprio_setup_replace(struct net_device *ndev, /* All validations passed - proceed with hardware configuration */ scoped_guard(spinlock_irqsave, &bp->lock) { /* Disable ENST queues if running before configuring */ + queue_mask = BIT_U32(bp->num_queues) - 1; gem_writel(bp, ENST_CONTROL, - bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); for (i = 0; i < conf->num_entries; i++) { queue = &bp->queues[enst_queue[i].queue_id]; @@ -4252,15 +4223,16 @@ static void macb_taprio_destroy(struct net_device *ndev) { struct macb *bp = netdev_priv(ndev); struct macb_queue *queue; - u32 enst_disable_mask; + u32 queue_mask; unsigned int q; netdev_reset_tc(ndev); - enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET; + queue_mask = BIT_U32(bp->num_queues) - 1; scoped_guard(spinlock_irqsave, &bp->lock) { /* Single disable command for all queues */ - gem_writel(bp, ENST_CONTROL, enst_disable_mask); + gem_writel(bp, ENST_CONTROL, + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); /* Clear all queue ENST registers in batch */ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -4364,13 +4336,15 @@ static void macb_configure_caps(struct macb *bp, dcfg = gem_readl(bp, DCFG2); if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) bp->caps |= MACB_CAPS_FIFO_MODE; + if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6))) + bp->caps |= MACB_CAPS_RSC; if (gem_has_ptp(bp)) { if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) dev_err(&bp->pdev->dev, "GEM doesn't support hardware ptp.\n"); else { #ifdef CONFIG_MACB_USE_HWSTAMP - bp->hw_dma_cap |= HW_DMA_CAP_PTP; + bp->caps |= MACB_CAPS_DMA_PTP; bp->ptp_info = &gem_ptp_info; #endif } @@ -4383,26 +4357,25 @@ static void macb_configure_caps(struct macb *bp, dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); } -static void macb_probe_queues(void __iomem *mem, - bool native_io, - unsigned int *queue_mask, - unsigned int *num_queues) +static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io) { - *queue_mask = 0x1; - *num_queues = 1; + /* BIT(0) is never set but queue 0 always exists. */ + unsigned int queue_mask = 0x1; - /* is it macb or gem ? - * - * We need to read directly from the hardware here because - * we are early in the probe process and don't have the - * MACB_CAPS_MACB_IS_GEM flag positioned - */ - if (!hw_is_gem(mem, native_io)) - return; + /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */ + if (hw_is_gem(mem, native_io)) { + if (native_io) + queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF; + else + queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF; - /* bit 0 is never set but queue 0 always exists */ - *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; - *num_queues = hweight32(*queue_mask); + if (fls(queue_mask) != ffz(queue_mask)) { + dev_err(dev, "queue mask %#x has a hole\n", queue_mask); + return -EINVAL; + } + } + + return hweight32(queue_mask); } static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, @@ -4520,10 +4493,7 @@ static int macb_init(struct platform_device *pdev) * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. */ - for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { - if (!(bp->queue_mask & (1 << hw_q))) - continue; - + for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) { queue = &bp->queues[q]; queue->bp = bp; spin_lock_init(&queue->tx_ptr_lock); @@ -4594,8 +4564,11 @@ static int macb_init(struct platform_device *pdev) /* Set features */ dev->hw_features = NETIF_F_SG; - /* Check LSO capability */ - if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) + /* Check LSO capability; runtime detection can be overridden by a cap + * flag if the hardware is known to be buggy + */ + if (!(bp->caps & MACB_CAPS_NO_LSO) && + GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) dev->hw_features |= MACB_NETIF_LSO; /* Checksum offload is only available on gem with packet buffer */ @@ -4614,8 +4587,8 @@ static int macb_init(struct platform_device *pdev) * each 4-tuple define requires 1 T2 screener reg + 3 compare regs */ reg = gem_readl(bp, DCFG8); - bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), - GEM_BFEXT(T2SCR, reg)); + bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3), + GEM_BFEXT(T2SCR, reg)); INIT_LIST_HEAD(&bp->rx_fs_list.list); if (bp->max_tuples > 0) { /* also needs one ethtype match to check IPv4 */ @@ -5168,13 +5141,13 @@ static int init_reset_optional(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Ensure PHY device used in SGMII mode is ready */ - bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); + bp->phy = devm_phy_optional_get(&pdev->dev, NULL); - if (IS_ERR(bp->sgmii_phy)) - return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy), + if (IS_ERR(bp->phy)) + return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy), "failed to get SGMII PHY\n"); - ret = phy_init(bp->sgmii_phy); + ret = phy_init(bp->phy); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to init SGMII PHY\n"); @@ -5203,7 +5176,7 @@ static int init_reset_optional(struct platform_device *pdev) /* Fully reset controller at hardware level if mapped in device tree */ ret = device_reset_optional(&pdev->dev); if (ret) { - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); return dev_err_probe(&pdev->dev, ret, "failed to reset controller"); } @@ -5211,8 +5184,30 @@ static int init_reset_optional(struct platform_device *pdev) err_out_phy_exit: if (ret) - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); + + return ret; +} + +static int eyeq5_init(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + struct device *dev = &pdev->dev; + int ret; + + bp->phy = devm_phy_get(dev, NULL); + if (IS_ERR(bp->phy)) + return dev_err_probe(dev, PTR_ERR(bp->phy), + "failed to get PHY\n"); + + ret = phy_init(bp->phy); + if (ret) + return dev_err_probe(dev, ret, "failed to init PHY\n"); + ret = macb_init(pdev); + if (ret) + phy_exit(bp->phy); return ret; } @@ -5370,6 +5365,17 @@ static const struct macb_config versal_config = { .usrio = &macb_default_usrio, }; +static const struct macb_config eyeq5_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE | + MACB_CAPS_NO_LSO, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = eyeq5_init, + .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, +}; + static const struct macb_config raspberrypi_rp1_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | MACB_CAPS_JUMBO | @@ -5401,6 +5407,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "microchip,mpfs-macb", .data = &mpfs_config }, { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config }, { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config }, + { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config }, { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config }, { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "xlnx,zynq-gem", .data = &zynq_config }, @@ -5424,21 +5431,17 @@ static const struct macb_config default_gem_config = { static int macb_probe(struct platform_device *pdev) { const struct macb_config *macb_config = &default_gem_config; - int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **, struct clk **, - struct clk **) = macb_config->clk_init; - int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; struct clk *tsu_clk = NULL; - unsigned int queue_mask, num_queues; - bool native_io; phy_interface_t interface; struct net_device *dev; struct resource *regs; u32 wtrmrk_rst_val; void __iomem *mem; struct macb *bp; + int num_queues; + bool native_io; int err, val; mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); @@ -5449,14 +5452,11 @@ static int macb_probe(struct platform_device *pdev) const struct of_device_id *match; match = of_match_node(macb_dt_ids, np); - if (match && match->data) { + if (match && match->data) macb_config = match->data; - clk_init = macb_config->clk_init; - init = macb_config->init; - } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); + err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); if (err) return err; @@ -5467,7 +5467,12 @@ static int macb_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); native_io = hw_is_native_io(mem); - macb_probe_queues(mem, native_io, &queue_mask, &num_queues); + num_queues = macb_probe_queues(&pdev->dev, mem, native_io); + if (num_queues < 0) { + err = num_queues; + goto err_disable_clocks; + } + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); if (!dev) { err = -ENOMEM; @@ -5491,16 +5496,13 @@ static int macb_probe(struct platform_device *pdev) bp->macb_reg_writel = hw_writel; } bp->num_queues = num_queues; - bp->queue_mask = queue_mask; - if (macb_config) - bp->dma_burst_length = macb_config->dma_burst_length; + bp->dma_burst_length = macb_config->dma_burst_length; bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; bp->rx_clk = rx_clk; bp->tsu_clk = tsu_clk; - if (macb_config) - bp->jumbo_max_len = macb_config->jumbo_max_len; + bp->jumbo_max_len = macb_config->jumbo_max_len; if (!hw_is_gem(bp->regs, bp->native_io)) bp->max_tx_length = MACB_MAX_TX_LEN; @@ -5546,7 +5548,7 @@ static int macb_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to set DMA mask\n"); goto err_out_free_netdev; } - bp->hw_dma_cap |= HW_DMA_CAP_64B; + bp->caps |= MACB_CAPS_DMA_64B; } #endif platform_set_drvdata(pdev, dev); @@ -5594,7 +5596,7 @@ static int macb_probe(struct platform_device *pdev) bp->phy_interface = interface; /* IP specific init */ - err = init(pdev); + err = macb_config->init(pdev); if (err) goto err_out_free_netdev; @@ -5616,7 +5618,6 @@ static int macb_probe(struct platform_device *pdev) macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), dev->base_addr, dev->irq, dev->dev_addr); - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); return 0; @@ -5626,7 +5627,7 @@ err_out_unregister_mdio: mdiobus_free(bp->mii_bus); err_out_phy_exit: - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); err_out_free_netdev: free_netdev(dev); @@ -5650,7 +5651,7 @@ static void macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); unregister_netdev(dev); - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); @@ -5677,7 +5678,7 @@ static int __maybe_unused macb_suspend(struct device *dev) u32 tmp; if (!device_may_wakeup(&bp->dev->dev)) - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); if (!netif_running(netdev)) return 0; @@ -5806,7 +5807,7 @@ static int __maybe_unused macb_resume(struct device *dev) int err; if (!device_may_wakeup(&bp->dev->dev)) - phy_init(bp->sgmii_phy); + phy_init(bp->phy); if (!netif_running(netdev)) return 0; diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index a63bf29c4fa8..c9e77819196e 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -28,14 +28,16 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, struct macb_dma_desc *desc) { - if (bp->hw_dma_cap == HW_DMA_CAP_PTP) - return (struct macb_dma_desc_ptp *) - ((u8 *)desc + sizeof(struct macb_dma_desc)); - if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) + if (!macb_dma_ptp(bp)) + return NULL; + + if (macb_dma64(bp)) return (struct macb_dma_desc_ptp *) ((u8 *)desc + sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64)); - return NULL; + else + return (struct macb_dma_desc_ptp *) + ((u8 *)desc + sizeof(struct macb_dma_desc)); } static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, @@ -380,7 +382,7 @@ int gem_get_hwtst(struct net_device *dev, struct macb *bp = netdev_priv(dev); *tstamp_config = bp->tstamp_config; - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) + if (!macb_dma_ptp(bp)) return -EOPNOTSUPP; return 0; @@ -407,7 +409,7 @@ int gem_set_hwtst(struct net_device *dev, struct macb *bp = netdev_priv(dev); u32 regval; - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) + if (!macb_dma_ptp(bp)) return -EOPNOTSUPP; switch (tstamp_config->tx_type) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8e2fcec26ea1..0732440eeacd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2107,20 +2107,16 @@ liquidio_get_stats64(struct net_device *netdev, lstats->tx_fifo_errors; } -/** - * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl - * @netdev: network device - * @ifr: interface request - */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +static int liquidio_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *conf, + struct netlink_ext_ack *extack) { - struct hwtstamp_config conf; struct lio *lio = GET_LIO(netdev); - if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) - return -EFAULT; + if (!lio->oct_dev->ptp_enable) + return -EOPNOTSUPP; - switch (conf.tx_type) { + switch (conf->tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; @@ -2128,7 +2124,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (conf.rx_filter) { + switch (conf->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -2146,39 +2142,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - conf.rx_filter = HWTSTAMP_FILTER_ALL; + conf->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + if (conf->rx_filter == HWTSTAMP_FILTER_ALL) ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); else ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); - return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; + return 0; } -/** - * liquidio_ioctl - ioctl handler - * @netdev: network device - * @ifr: interface request - * @cmd: command - */ -static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int liquidio_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *conf) { struct lio *lio = GET_LIO(netdev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (lio->oct_dev->ptp_enable) - return hwtstamp_ioctl(netdev, ifr); - fallthrough; - default: - return -EOPNOTSUPP; - } + /* TX timestamping is technically always on */ + conf->tx_type = HWTSTAMP_TX_ON; + conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return 0; } /** @@ -3227,7 +3216,6 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, .ndo_set_vf_mac = liquidio_set_vf_mac, @@ -3238,6 +3226,8 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, .ndo_get_port_parent_id = liquidio_get_port_parent_id, + .ndo_hwtstamp_get = liquidio_hwtstamp_get, + .ndo_hwtstamp_set = liquidio_hwtstamp_set, }; /** diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 3230dff5ba05..e02942dbbcce 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1236,20 +1236,13 @@ liquidio_get_stats64(struct net_device *netdev, lstats->tx_carrier_errors; } -/** - * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl - * @netdev: network device - * @ifr: interface request - */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +static int liquidio_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *conf, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); - struct hwtstamp_config conf; - - if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) - return -EFAULT; - switch (conf.tx_type) { + switch (conf->tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; @@ -1257,7 +1250,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (conf.rx_filter) { + switch (conf->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -1275,35 +1268,31 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - conf.rx_filter = HWTSTAMP_FILTER_ALL; + conf->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + if (conf->rx_filter == HWTSTAMP_FILTER_ALL) ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); else ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); - return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; + return 0; } -/** - * liquidio_ioctl - ioctl handler - * @netdev: network device - * @ifr: interface request - * @cmd: command - */ -static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int liquidio_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *conf) { - switch (cmd) { - case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); - default: - return -EOPNOTSUPP; - } + struct lio *lio = GET_LIO(netdev); + + /* TX timestamping is techically always on */ + conf->tx_type = HWTSTAMP_TX_ON; + conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + return 0; } static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) @@ -1881,9 +1870,10 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, + .ndo_hwtstamp_get = liquidio_hwtstamp_get, + .ndo_hwtstamp_set = liquidio_hwtstamp_set, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 393b9951490a..c190fc6538d4 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -690,19 +690,16 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) return IRQ_HANDLED; } -static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, - struct ifreq *rq, int cmd) +static int octeon_mgmt_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct octeon_mgmt *p = netdev_priv(netdev); - struct hwtstamp_config config; - union cvmx_mio_ptp_clock_cfg ptp; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mio_ptp_clock_cfg ptp; bool have_hw_timestamps = false; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - /* Check the status of hardware for tiemstamps */ + /* Check the status of hardware for timestamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); @@ -733,10 +730,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, have_hw_timestamps = true; } - if (!have_hw_timestamps) + if (!have_hw_timestamps) { + NL_SET_ERR_MSG_MOD(extack, "HW doesn't support timestamping"); return -EINVAL; + } - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -744,7 +743,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: p->has_rx_tstamp = false; rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); @@ -766,33 +765,34 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - p->has_rx_tstamp = have_hw_timestamps; - config.rx_filter = HWTSTAMP_FILTER_ALL; - if (p->has_rx_tstamp) { - rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); - rxx_frm_ctl.s.ptp_mode = 1; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - } + p->has_rx_tstamp = true; + config->rx_filter = HWTSTAMP_FILTER_ALL; + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); break; default: return -ERANGE; } - if (copy_to_user(rq->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } -static int octeon_mgmt_ioctl(struct net_device *netdev, - struct ifreq *rq, int cmd) +static int octeon_mgmt_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - switch (cmd) { - case SIOCSHWTSTAMP: - return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); - default: - return phy_do_ioctl(netdev, rq, cmd); - } + struct octeon_mgmt *p = netdev_priv(netdev); + + /* Check the status of hardware for timestamps */ + if (!OCTEON_IS_MODEL(OCTEON_CN6XXX)) + return -EINVAL; + + config->tx_type = HWTSTAMP_TX_ON; + config->rx_filter = p->has_rx_tstamp ? + HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static void octeon_mgmt_disable_link(struct octeon_mgmt *p) @@ -1370,11 +1370,13 @@ static const struct net_device_ops octeon_mgmt_ops = { .ndo_start_xmit = octeon_mgmt_xmit, .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_eth_ioctl = octeon_mgmt_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, #endif + .ndo_hwtstamp_get = octeon_mgmt_hwtstamp_get, + .ndo_hwtstamp_set = octeon_mgmt_hwtstamp_set, }; static int octeon_mgmt_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 1be2dc40a1a6..0b6e30a8feb0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1899,18 +1899,18 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) } } -static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +static int nicvf_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; struct nicvf *nic = netdev_priv(netdev); - if (!nic->ptp_clock) + if (!nic->ptp_clock) { + NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported"); return -ENODEV; + } - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -1918,7 +1918,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: nic->hw_rx_tstamp = false; break; @@ -1937,7 +1937,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: nic->hw_rx_tstamp = true; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; @@ -1946,20 +1946,24 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (netif_running(netdev)) nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp); - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } -static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +static int nicvf_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - switch (cmd) { - case SIOCSHWTSTAMP: - return nicvf_config_hwtstamp(netdev, req); - default: - return -EOPNOTSUPP; - } + struct nicvf *nic = netdev_priv(netdev); + + if (!nic->ptp_clock) + return -ENODEV; + + /* TX timestamping is technically always on */ + config->tx_type = HWTSTAMP_TX_ON; + config->rx_filter = nic->hw_rx_tstamp ? + HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, @@ -2081,8 +2085,9 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_eth_ioctl = nicvf_ioctl, .ndo_set_rx_mode = nicvf_set_rx_mode, + .ndo_hwtstamp_get = nicvf_hwtstamp_get, + .ndo_hwtstamp_set = nicvf_hwtstamp_set, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0d85198fb03d..f20f4bc58492 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -674,7 +674,7 @@ struct port_info { struct cxgb_fcoe fcoe; #endif /* CONFIG_CHELSIO_T4_FCOE */ bool rxtstamp; /* Enable TS */ - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; u32 eth_flags; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 392723ef14e5..7e2283c95b97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3042,12 +3042,87 @@ static void cxgb_get_stats(struct net_device *dev, ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; } +static int cxgb_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct port_info *pi = netdev_priv(dev); + + *config = pi->tstamp_config; + return 0; +} + +static int cxgb_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (is_t4(adapter->params.chip)) { + /* For T4 Adapters */ + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pi->rxtstamp = false; + break; + case HWTSTAMP_FILTER_ALL: + pi->rxtstamp = true; + break; + default: + return -ERANGE; + } + pi->tstamp_config = *config; + return 0; + } + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pi->rxtstamp = false; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pi->rxtstamp = true; + break; + default: + return -ERANGE; + } + + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_NONE) { + if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) + pi->ptp_enable = false; + } + + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + if (cxgb4_ptp_redirect_rx_packet(adapter, pi) >= 0) + pi->ptp_enable = true; + } + pi->tstamp_config = *config; + return 0; +} + static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { unsigned int mbox; int ret = 0, prtad, devad; struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; switch (cmd) { @@ -3076,81 +3151,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, data->reg_num, data->val_in); break; - case SIOCGHWTSTAMP: - return copy_to_user(req->ifr_data, &pi->tstamp_config, - sizeof(pi->tstamp_config)) ? - -EFAULT : 0; - case SIOCSHWTSTAMP: - if (copy_from_user(&pi->tstamp_config, req->ifr_data, - sizeof(pi->tstamp_config))) - return -EFAULT; - - if (!is_t4(adapter->params.chip)) { - switch (pi->tstamp_config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (pi->tstamp_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - pi->rxtstamp = false; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - cxgb4_ptprx_timestamping(pi, pi->port_id, - PTP_TS_L4); - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - cxgb4_ptprx_timestamping(pi, pi->port_id, - PTP_TS_L2_L4); - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - pi->rxtstamp = true; - break; - default: - pi->tstamp_config.rx_filter = - HWTSTAMP_FILTER_NONE; - return -ERANGE; - } - - if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && - (pi->tstamp_config.rx_filter == - HWTSTAMP_FILTER_NONE)) { - if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) - pi->ptp_enable = false; - } - - if (pi->tstamp_config.rx_filter != - HWTSTAMP_FILTER_NONE) { - if (cxgb4_ptp_redirect_rx_packet(adapter, - pi) >= 0) - pi->ptp_enable = true; - } - } else { - /* For T4 Adapters */ - switch (pi->tstamp_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - pi->rxtstamp = false; - break; - case HWTSTAMP_FILTER_ALL: - pi->rxtstamp = true; - break; - default: - pi->tstamp_config.rx_filter = - HWTSTAMP_FILTER_NONE; - return -ERANGE; - } - } - return copy_to_user(req->ifr_data, &pi->tstamp_config, - sizeof(pi->tstamp_config)) ? - -EFAULT : 0; default: return -EOPNOTSUPP; } @@ -3875,6 +3875,8 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_setup_tc = cxgb_setup_tc, .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, + .ndo_hwtstamp_get = cxgb_hwtstamp_get, + .ndo_hwtstamp_set = cxgb_hwtstamp_set, }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 0765d000eaef..e2b5554531b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -161,20 +161,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, static void cxgb4_process_flow_match(struct net_device *dev, struct flow_rule *rule, + u16 addr_type, struct ch_filter_specification *fs) { - u16 addr_type = 0; - - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_match_control match; - - flow_rule_match_control(rule, &match); - addr_type = match.key->addr_type; - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -327,9 +316,6 @@ static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack, return -EOPNOTSUPP; } - if (flow_rule_match_has_control_flags(rule, extack)) - return -EOPNOTSUPP; - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -858,6 +844,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, { struct adapter *adap = netdev2adap(dev); struct filter_ctx ctx; + u16 addr_type = 0; u8 inet_family; int fidx, ret; @@ -867,7 +854,28 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, if (cxgb4_validate_flow_match(extack, rule)) return -EOPNOTSUPP; - cxgb4_process_flow_match(dev, rule, fs); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { + fs->val.frag = match.key->flags & FLOW_DIS_IS_FRAGMENT; + fs->mask.frag = true; + } + + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, + match.mask->flags, extack)) + return -EOPNOTSUPP; + + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + + cxgb4_process_flow_match(dev, rule, addr_type, fs); cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 4ee970f3bad6..ee0154337a9c 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1199,12 +1199,12 @@ static struct sock *chtls_recv_sock(struct sock *lsk, struct ipv6_pinfo *newnp = inet6_sk(newsk); struct ipv6_pinfo *np = inet6_sk(lsk); - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; + newinet->pinet6 = &newtcp6sk->inet6; + newinet->ipv6_fl_list = NULL; memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr; - newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newsk->sk_bound_dev_if = treq->ir_iif; newinet->inet_opt = NULL; diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index f188fba021a6..03e19aea9ea4 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -176,7 +176,7 @@ struct tsnep_adapter { struct tsnep_gcl gcl[2]; int next_gcl; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; /* ptp clock lock */ @@ -203,7 +203,11 @@ extern const struct ethtool_ops tsnep_ethtool_ops; int tsnep_ptp_init(struct tsnep_adapter *adapter); void tsnep_ptp_cleanup(struct tsnep_adapter *adapter); -int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int tsnep_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int tsnep_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); int tsnep_tc_init(struct tsnep_adapter *adapter); void tsnep_tc_cleanup(struct tsnep_adapter *adapter); diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index eba73246f986..b118407c30e8 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -2168,16 +2168,6 @@ static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); } -static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) -{ - if (!netif_running(netdev)) - return -EINVAL; - if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) - return tsnep_ptp_ioctl(netdev, ifr, cmd); - return phy_mii_ioctl(netdev->phydev, ifr, cmd); -} - static void tsnep_netdev_set_multicast(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); @@ -2384,7 +2374,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, .ndo_start_xmit = tsnep_netdev_xmit_frame, - .ndo_eth_ioctl = tsnep_netdev_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = tsnep_netdev_set_multicast, .ndo_get_stats64 = tsnep_netdev_get_stats64, .ndo_set_mac_address = tsnep_netdev_set_mac_address, @@ -2394,6 +2384,8 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_bpf = tsnep_netdev_bpf, .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup, + .ndo_hwtstamp_get = tsnep_ptp_hwtstamp_get, + .ndo_hwtstamp_set = tsnep_ptp_hwtstamp_set, }; static int tsnep_mac_init(struct tsnep_adapter *adapter) diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c index 54fbf0126815..ae1308eb813d 100644 --- a/drivers/net/ethernet/engleder/tsnep_ptp.c +++ b/drivers/net/ethernet/engleder/tsnep_ptp.c @@ -19,57 +19,53 @@ void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time) *time = (((u64)high) << 32) | ((u64)low); } -int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +int tsnep_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { struct tsnep_adapter *adapter = netdev_priv(netdev); - struct hwtstamp_config config; - - if (!ifr) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) { - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - memcpy(&adapter->hwtstamp_config, &config, - sizeof(adapter->hwtstamp_config)); + + *config = adapter->hwtstamp_config; + return 0; +} + +int tsnep_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; } - if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, - sizeof(adapter->hwtstamp_config))) - return -EFAULT; + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + adapter->hwtstamp_config = *config; return 0; } diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 6ac8547ef9b8..3c9961806f75 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -196,7 +196,7 @@ enum intr_status_bits { ERI = 0x00000080, /* receive early int */ CNTOVF = 0x00000040, /* counter overflow */ RBU = 0x00000020, /* receive buffer unavailable */ - TBU = 0x00000010, /* transmit buffer unavilable */ + TBU = 0x00000010, /* transmit buffer unavailable */ TI = 0x00000008, /* transmit interrupt */ RI = 0x00000004, /* receive interrupt */ RxErr = 0x00000002, /* receive error */ @@ -215,7 +215,7 @@ enum rx_mode_bits { CR_W_RXMODEMASK = 0x000000e0, CR_W_PROM = 0x00000080, /* promiscuous mode */ CR_W_AB = 0x00000040, /* accept broadcast */ - CR_W_AM = 0x00000020, /* accept mutlicast */ + CR_W_AM = 0x00000020, /* accept multicast */ CR_W_ARP = 0x00000008, /* receive runt pkt */ CR_W_ALP = 0x00000004, /* receive long pkt */ CR_W_SEP = 0x00000002, /* receive error pkt */ diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index bbef47c3480c..e2a591cf9601 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -28,6 +28,7 @@ config FEC depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select PHYLIB + select FIXED_PHY if M5272 select PAGE_POOL imply PAGE_POOL_STATS imply NET_SELFTESTS diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 0535e92404e3..d5e5800b84ef 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -14,12 +14,21 @@ u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) { + /* ENETC with pseudo MAC does not have Ethernet MAC + * port registers. + */ + if (enetc_is_pseudo_mac(si)) + return 0; + return enetc_port_rd(&si->hw, reg); } EXPORT_SYMBOL_GPL(enetc_port_mac_rd); void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) { + if (enetc_is_pseudo_mac(si)) + return; + enetc_port_wr(&si->hw, reg, val); if (si->hw_features & ENETC_SI_F_QBU) enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val); @@ -3367,7 +3376,8 @@ int enetc_hwtstamp_set(struct net_device *ndev, new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: - if (!enetc_si_is_pf(priv->si)) + if (!enetc_si_is_pf(priv->si) || + enetc_is_pseudo_mac(priv->si)) return -EOPNOTSUPP; new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; @@ -3708,6 +3718,13 @@ static const struct enetc_drvdata enetc4_pf_data = { .eth_ops = &enetc4_pf_ethtool_ops, }; +static const struct enetc_drvdata enetc4_ppm_data = { + .sysclk_freq = ENETC_CLK_333M, + .tx_csum = true, + .max_frags = ENETC4_MAX_SKB_FRAGS, + .eth_ops = &enetc4_ppm_ethtool_ops, +}; + static const struct enetc_drvdata enetc_vf_data = { .sysclk_freq = ENETC_CLK_400M, .max_frags = ENETC_MAX_SKB_FRAGS, @@ -3727,6 +3744,15 @@ static const struct enetc_platform_info enetc_info[] = { .dev_id = ENETC_DEV_ID_VF, .data = &enetc_vf_data, }, + { + .revision = ENETC_REV_4_3, + .dev_id = NXP_ENETC_PPM_DEV_ID, + .data = &enetc4_ppm_data, + }, + { .revision = ENETC_REV_4_3, + .dev_id = NXP_ENETC_PF_DEV_ID, + .data = &enetc4_pf_data, + }, }; int enetc_get_driver_data(struct enetc_si *si) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index f279fa597991..dce27bd67a7d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -273,6 +273,7 @@ enum enetc_errata { #define ENETC_SI_F_QBV BIT(1) #define ENETC_SI_F_QBU BIT(2) #define ENETC_SI_F_LSO BIT(3) +#define ENETC_SI_F_PPM BIT(4) /* pseudo MAC */ struct enetc_drvdata { u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */ @@ -362,6 +363,11 @@ static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) } } +static inline bool enetc_is_pseudo_mac(struct enetc_si *si) +{ + return si->hw_features & ENETC_SI_F_PPM; +} + #define ENETC_MAX_NUM_TXQS 8 #define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) @@ -534,6 +540,8 @@ int enetc_hwtstamp_set(struct net_device *ndev, extern const struct ethtool_ops enetc_pf_ethtool_ops; extern const struct ethtool_ops enetc4_pf_ethtool_ops; extern const struct ethtool_ops enetc_vf_ethtool_ops; +extern const struct ethtool_ops enetc4_ppm_ethtool_ops; + void enetc_set_ethtool_ops(struct net_device *ndev); void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link); void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h index 19bf0e89cdc2..ebea4298791c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h @@ -11,6 +11,7 @@ #define NXP_ENETC_VENDOR_ID 0x1131 #define NXP_ENETC_PF_DEV_ID 0xe101 +#define NXP_ENETC_PPM_DEV_ID 0xe110 /**********************Station interface registers************************/ /* Station interface LSO segmentation flag mask register 0/1 */ @@ -115,6 +116,10 @@ #define PMCAPR_HD BIT(8) #define PMCAPR_FP GENMASK(10, 9) +/* Port capability register */ +#define ENETC4_PCAPR 0x4000 +#define PCAPR_LINK_TYPE BIT(4) + /* Port configuration register */ #define ENETC4_PCR 0x4010 #define PCR_HDR_FMT BIT(0) @@ -193,4 +198,29 @@ #define SSP_1G 2 #define PM_IF_MODE_ENA BIT(15) +/**********************ENETC Pseudo MAC port registers************************/ +/* Port pseudo MAC receive octets counter (64-bit) */ +#define ENETC4_PPMROCR 0x5080 + +/* Port pseudo MAC receive unicast frame counter register (64-bit) */ +#define ENETC4_PPMRUFCR 0x5088 + +/* Port pseudo MAC receive multicast frame counter register (64-bit) */ +#define ENETC4_PPMRMFCR 0x5090 + +/* Port pseudo MAC receive broadcast frame counter register (64-bit) */ +#define ENETC4_PPMRBFCR 0x5098 + +/* Port pseudo MAC transmit octets counter (64-bit) */ +#define ENETC4_PPMTOCR 0x50c0 + +/* Port pseudo MAC transmit unicast frame counter register (64-bit) */ +#define ENETC4_PPMTUFCR 0x50c8 + +/* Port pseudo MAC transmit multicast frame counter register (64-bit) */ +#define ENETC4_PPMTMFCR 0x50d0 + +/* Port pseudo MAC transmit broadcast frame counter register (64-bit) */ +#define ENETC4_PPMTBFCR 0x50d8 + #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index 82c443b28b15..498346dd996a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -41,6 +41,16 @@ static void enetc4_get_port_caps(struct enetc_pf *pf) pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE; } +static void enetc4_get_psi_hw_features(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC4_PCAPR); + if (val & PCAPR_LINK_TYPE) + si->hw_features |= ENETC_SI_F_PPM; +} + static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si, const u8 *addr) { @@ -277,6 +287,7 @@ static int enetc4_pf_struct_init(struct enetc_si *si) pf->ops = &enetc4_pf_ops; enetc4_get_port_caps(pf); + enetc4_get_psi_hw_features(si); return 0; } @@ -589,6 +600,9 @@ static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode, struct enetc_si *si = pf->si; u32 val; + if (enetc_is_pseudo_mac(si)) + return; + val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0)); val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA); @@ -1071,6 +1085,7 @@ static void enetc4_pf_remove(struct pci_dev *pdev) static const struct pci_device_id enetc4_pf_id_table[] = { { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) }, + { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) }, { 0, } /* End of table. */ }; MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 71d052de669a..3e222321b937 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -435,6 +435,48 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev, } } +static void enetc_ppm_mac_stats(struct enetc_si *si, + struct ethtool_eth_mac_stats *s) +{ + struct enetc_hw *hw = &si->hw; + u64 rufcr, rmfcr, rbfcr; + u64 tufcr, tmfcr, tbfcr; + + rufcr = enetc_port_rd64(hw, ENETC4_PPMRUFCR); + rmfcr = enetc_port_rd64(hw, ENETC4_PPMRMFCR); + rbfcr = enetc_port_rd64(hw, ENETC4_PPMRBFCR); + + tufcr = enetc_port_rd64(hw, ENETC4_PPMTUFCR); + tmfcr = enetc_port_rd64(hw, ENETC4_PPMTMFCR); + tbfcr = enetc_port_rd64(hw, ENETC4_PPMTBFCR); + + s->FramesTransmittedOK = tufcr + tmfcr + tbfcr; + s->FramesReceivedOK = rufcr + rmfcr + rbfcr; + s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC4_PPMTOCR); + s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC4_PPMROCR); + s->MulticastFramesXmittedOK = tmfcr; + s->BroadcastFramesXmittedOK = tbfcr; + s->MulticastFramesReceivedOK = rmfcr; + s->BroadcastFramesReceivedOK = rbfcr; +} + +static void enetc_ppm_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + switch (mac_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_ppm_mac_stats(priv->si, mac_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_mac_stats(ndev, mac_stats); + break; + } +} + static void enetc_get_eth_ctrl_stats(struct net_device *ndev, struct ethtool_eth_ctrl_stats *ctrl_stats) { @@ -894,6 +936,9 @@ static int enetc_get_phc_index_by_pdev(struct enetc_si *si) case ENETC_REV_4_1: devfn = PCI_DEVFN(24, 0); break; + case ENETC_REV_4_3: + devfn = PCI_DEVFN(0, 1); + break; default: return -1; } @@ -1313,6 +1358,25 @@ const struct ethtool_ops enetc_pf_ethtool_ops = { .get_mm_stats = enetc_get_mm_stats, }; +const struct ethtool_ops enetc4_ppm_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats, + .get_rxnfc = enetc4_get_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_rxfh_fields = enetc_get_rxfh_fields, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link_ksettings = enetc_get_link_ksettings, + .set_link_ksettings = enetc_set_link_ksettings, + .get_link = ethtool_op_get_link, +}; + const struct ethtool_ops enetc_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 377c96325814..7b882b8921fe 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -378,6 +378,7 @@ enum enetc_bdr_type {TX, RX}; #define EIPBRR0_REVISION GENMASK(15, 0) #define ENETC_REV_1_0 0x0100 #define ENETC_REV_4_1 0X0401 +#define ENETC_REV_4_3 0x0403 #define ENETC_G_EIPBRR1 0x0bfc #define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c index edf14a95cab7..9c634205e2a7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c @@ -109,7 +109,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | @@ -133,6 +133,9 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->features |= NETIF_F_RXHASH; } + if (!enetc_is_pseudo_mac(si)) + ndev->hw_features |= NETIF_F_LOOPBACK; + /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ if (!is_enetc_rev1(si)) goto end; diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c index bcb8eefeb93c..d7aee3c934d3 100644 --- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c +++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c @@ -47,6 +47,13 @@ #define PCS_PROT_SFI BIT(4) #define PCS_PROT_10G_SXGMII BIT(6) +#define IMX94_EXT_PIN_CONTROL 0x10 +#define MAC2_MAC3_SEL BIT(1) + +#define IMX94_NETC_LINK_CFG(a) (0x4c + (a) * 4) +#define NETC_LINK_CFG_MII_PROT GENMASK(3, 0) +#define NETC_LINK_CFG_IO_VAR GENMASK(19, 16) + /* NETC privileged register block register */ #define PRB_NETCRR 0x100 #define NETCRR_SR BIT(0) @@ -59,6 +66,7 @@ /* NETC integrated endpoint register block register */ #define IERB_EMDIOFAUXR 0x344 #define IERB_T0FAUXR 0x444 +#define IERB_ETBCR(a) (0x300c + 0x100 * (a)) #define IERB_EFAUXR(a) (0x3044 + 0x100 * (a)) #define IERB_VFAUXR(a) (0x4004 + 0x40 * (a)) #define FAUXR_LDID GENMASK(3, 0) @@ -68,6 +76,19 @@ #define IMX95_ENETC1_BUS_DEVFN 0x40 #define IMX95_ENETC2_BUS_DEVFN 0x80 +#define IMX94_ENETC0_BUS_DEVFN 0x100 +#define IMX94_ENETC1_BUS_DEVFN 0x140 +#define IMX94_ENETC2_BUS_DEVFN 0x180 +#define IMX94_TIMER0_BUS_DEVFN 0x1 +#define IMX94_TIMER1_BUS_DEVFN 0x101 +#define IMX94_TIMER2_BUS_DEVFN 0x181 +#define IMX94_ENETC0_LINK 3 +#define IMX94_ENETC1_LINK 4 +#define IMX94_ENETC2_LINK 5 + +#define NETC_ENETC_ID(a) (a) +#define NETC_TIMER_ID(a) (a) + /* Flags for different platforms */ #define NETC_HAS_NETCMIX BIT(0) @@ -192,6 +213,90 @@ static int imx95_netcmix_init(struct platform_device *pdev) return 0; } +static int imx94_enetc_get_link_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse ENETC link number */ + switch (bus_devfn) { + case IMX94_ENETC0_BUS_DEVFN: + return IMX94_ENETC0_LINK; + case IMX94_ENETC1_BUS_DEVFN: + return IMX94_ENETC1_LINK; + case IMX94_ENETC2_BUS_DEVFN: + return IMX94_ENETC2_LINK; + default: + return -EINVAL; + } +} + +static int imx94_link_config(struct netc_blk_ctrl *priv, + struct device_node *np, int link_id) +{ + phy_interface_t interface; + int mii_proto; + u32 val; + + /* The node may be disabled and does not have a 'phy-mode' + * or 'phy-connection-type' property. + */ + if (of_get_phy_mode(np, &interface)) + return 0; + + mii_proto = netc_get_link_mii_protocol(interface); + if (mii_proto < 0) + return mii_proto; + + val = mii_proto & NETC_LINK_CFG_MII_PROT; + if (val == MII_PROT_SERIAL) + val = u32_replace_bits(val, IO_VAR_16FF_16G_SERDES, + NETC_LINK_CFG_IO_VAR); + + netc_reg_write(priv->netcmix, IMX94_NETC_LINK_CFG(link_id), val); + + return 0; +} + +static int imx94_enetc_link_config(struct netc_blk_ctrl *priv, + struct device_node *np) +{ + int link_id = imx94_enetc_get_link_id(np); + + if (link_id < 0) + return link_id; + + return imx94_link_config(priv, np, link_id); +} + +static int imx94_netcmix_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + u32 val; + int err; + + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + err = imx94_enetc_link_config(priv, gchild); + if (err) + return err; + } + } + + /* ENETC 0 and switch port 2 share the same parallel interface. + * Currently, the switch is not supported, so this interface is + * used by ENETC 0 by default. + */ + val = netc_reg_read(priv->netcmix, IMX94_EXT_PIN_CONTROL); + val |= MAC2_MAC3_SEL; + netc_reg_write(priv->netcmix, IMX94_EXT_PIN_CONTROL, val); + + return 0; +} + static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv) { return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK); @@ -247,6 +352,98 @@ static int imx95_ierb_init(struct platform_device *pdev) return 0; } +static int imx94_get_enetc_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse ENETC offset */ + switch (bus_devfn) { + case IMX94_ENETC0_BUS_DEVFN: + return NETC_ENETC_ID(0); + case IMX94_ENETC1_BUS_DEVFN: + return NETC_ENETC_ID(1); + case IMX94_ENETC2_BUS_DEVFN: + return NETC_ENETC_ID(2); + default: + return -EINVAL; + } +} + +static int imx94_get_timer_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse NETC PTP timer ID, the timer0 is on bus 0, + * the timer 1 and timer2 is on bus 1. + */ + switch (bus_devfn) { + case IMX94_TIMER0_BUS_DEVFN: + return NETC_TIMER_ID(0); + case IMX94_TIMER1_BUS_DEVFN: + return NETC_TIMER_ID(1); + case IMX94_TIMER2_BUS_DEVFN: + return NETC_TIMER_ID(2); + default: + return -EINVAL; + } +} + +static int imx94_enetc_update_tid(struct netc_blk_ctrl *priv, + struct device_node *np) +{ + struct device *dev = &priv->pdev->dev; + struct device_node *timer_np; + int eid, tid; + + eid = imx94_get_enetc_id(np); + if (eid < 0) { + dev_err(dev, "Failed to get ENETC ID\n"); + return eid; + } + + timer_np = of_parse_phandle(np, "ptp-timer", 0); + if (!timer_np) { + /* If 'ptp-timer' is not present, the timer1 is the default + * timer of all standalone ENETCs, which is on the same PCIe + * bus as these ENETCs. + */ + tid = NETC_TIMER_ID(1); + goto end; + } + + tid = imx94_get_timer_id(timer_np); + of_node_put(timer_np); + if (tid < 0) { + dev_err(dev, "Failed to get NETC Timer ID\n"); + return tid; + } + +end: + netc_reg_write(priv->ierb, IERB_ETBCR(eid), tid); + + return 0; +} + +static int imx94_ierb_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + int err; + + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + err = imx94_enetc_update_tid(priv, gchild); + if (err) + return err; + } + } + + return 0; +} + static int netc_ierb_init(struct platform_device *pdev) { struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); @@ -340,8 +537,15 @@ static const struct netc_devinfo imx95_devinfo = { .ierb_init = imx95_ierb_init, }; +static const struct netc_devinfo imx94_devinfo = { + .flags = NETC_HAS_NETCMIX, + .netcmix_init = imx94_netcmix_init, + .ierb_init = imx94_ierb_init, +}; + static const struct of_device_id netc_blk_ctrl_match[] = { { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo }, + { .compatible = "nxp,imx94-netc-blk-ctrl", .data = &imx94_devinfo }, {}, }; MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3222359ac15b..b6fbb84cfb06 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -52,6 +52,7 @@ #include <linux/of_net.h> #include <linux/phy.h> #include <linux/pinctrl/consumer.h> +#include <linux/phy_fixed.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/prefetch.h> @@ -2233,7 +2234,6 @@ static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2282,7 +2282,6 @@ static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2314,7 +2313,6 @@ static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, if (ret) netdev_err(fep->netdev, "MDIO write timeout\n"); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2358,7 +2356,6 @@ static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, netdev_err(fep->netdev, "MDIO write timeout\n"); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2478,11 +2475,8 @@ static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = NULL; - char mdio_bus_id[MII_BUS_ID_SIZE]; - char phy_name[MII_BUS_ID_SIZE + 3]; - int phy_id; - int dev_id = fep->dev_id; + struct phy_device *phy_dev; + int ret; if (fep->phy_node) { phy_dev = of_phy_connect(ndev, fep->phy_node, @@ -2494,30 +2488,28 @@ static int fec_enet_mii_probe(struct net_device *ndev) } } else { /* check for attached phy */ - for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) - continue; - if (dev_id--) - continue; - strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); - break; - } + phy_dev = phy_find_first(fep->mii_bus); + if (fep->dev_id && phy_dev) + phy_dev = phy_find_next(fep->mii_bus, phy_dev); - if (phy_id >= PHY_MAX_ADDR) { + if (!phy_dev) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); - phy_id = 0; + phy_dev = fixed_phy_register_100fd(); + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not register fixed PHY\n"); + return PTR_ERR(phy_dev); + } } - snprintf(phy_name, sizeof(phy_name), - PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, - fep->phy_interface); - } + ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link, + fep->phy_interface); + if (ret) { + if (phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); + netdev_err(ndev, "could not attach to PHY\n"); + return ret; + } - if (IS_ERR(phy_dev)) { - netdev_err(ndev, "could not attach to PHY\n"); - return PTR_ERR(phy_dev); } /* mask with MAC supported features */ @@ -2554,7 +2546,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) int err = -ENXIO; u32 mii_speed, holdtime; u32 bus_freq; - int addr; /* * The i.MX28 dual fec interfaces are not equal. @@ -2669,11 +2660,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) of_node_put(node); /* find all the PHY devices on the bus and set mac_managed_pm to true */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phydev = mdiobus_get_phy(fep->mii_bus, addr); - if (phydev) - phydev->mac_managed_pm = true; - } + mdiobus_for_each_phy(fep->mii_bus, phydev) + phydev->mac_managed_pm = true; mii_cnt++; @@ -2841,7 +2829,6 @@ static void fec_enet_get_regs(struct net_device *ndev, buf[off] = readl(&theregs[off]); } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -3618,7 +3605,6 @@ err_enet_mii_probe: err_enet_alloc: fec_enet_clk_enable(ndev, false); clk_enable: - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; @@ -3628,8 +3614,9 @@ static int fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; - phy_stop(ndev->phydev); + phy_stop(phy_dev); if (netif_device_present(ndev)) { napi_disable(&fep->napi); @@ -3637,7 +3624,10 @@ fec_enet_close(struct net_device *ndev) fec_stop(ndev); } - phy_disconnect(ndev->phydev); + phy_disconnect(phy_dev); + + if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_unused(); @@ -3649,7 +3639,6 @@ fec_enet_close(struct net_device *ndev) cpu_latency_qos_remove_request(&fep->pm_qos_req); pinctrl_pm_select_sleep_state(&fep->pdev->dev); - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); fec_enet_free_buffers(ndev); @@ -4618,7 +4607,6 @@ fec_probe(struct platform_device *pdev) INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h index 1250e10d21db..55e705e239f8 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth.h +++ b/drivers/net/ethernet/fungible/funeth/funeth.h @@ -4,7 +4,7 @@ #define _FUNETH_H #include <uapi/linux/if_ether.h> -#include <uapi/linux/net_tstamp.h> +#include <linux/net_tstamp.h> #include <linux/mutex.h> #include <linux/seqlock.h> #include <linux/xarray.h> @@ -121,7 +121,7 @@ struct funeth_priv { u8 rx_coal_usec; u8 rx_coal_count; - struct hwtstamp_config hwtstamp_cfg; + struct kernel_hwtstamp_config hwtstamp_cfg; /* cumulative queue stats from earlier queue instances */ u64 tx_packets; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c index ac86179a0a81..792cddac6f1b 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_main.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c @@ -1014,26 +1014,25 @@ static int fun_get_port_attributes(struct net_device *netdev) return 0; } -static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int fun_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { const struct funeth_priv *fp = netdev_priv(dev); - return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, - sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; + *config = fp->hwtstamp_cfg; + return 0; } -static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int fun_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct funeth_priv *fp = netdev_priv(dev); - struct hwtstamp_config cfg; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; /* no TX HW timestamps */ - cfg.tx_type = HWTSTAMP_TX_OFF; + config->tx_type = HWTSTAMP_TX_OFF; - switch (cfg.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -1051,26 +1050,14 @@ static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - cfg.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - fp->hwtstamp_cfg = cfg; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} - -static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCSHWTSTAMP: - return fun_hwtstamp_set(dev, ifr); - case SIOCGHWTSTAMP: - return fun_hwtstamp_get(dev, ifr); - default: - return -EOPNOTSUPP; - } + fp->hwtstamp_cfg = *config; + return 0; } /* Prepare the queues for XDP. */ @@ -1340,7 +1327,6 @@ static const struct net_device_ops fun_netdev_ops = { .ndo_change_mtu = fun_change_mtu, .ndo_set_mac_address = fun_set_macaddr, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = fun_ioctl, .ndo_uninit = fun_uninit, .ndo_bpf = fun_xdp, .ndo_xdp_xmit = fun_xdp_xmit_frames, @@ -1348,6 +1334,8 @@ static const struct net_device_ops fun_netdev_ops = { .ndo_set_vf_vlan = fun_set_vf_vlan, .ndo_set_vf_rate = fun_set_vf_rate, .ndo_get_vf_config = fun_get_vf_config, + .ndo_hwtstamp_get = fun_hwtstamp_get, + .ndo_hwtstamp_set = fun_hwtstamp_set, }; #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \ diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4cc6dcbfd367..a33b44c1eb86 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -59,8 +59,6 @@ #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 -#define GVE_MAX_RX_BUFFER_SIZE 4096 - #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 @@ -1169,6 +1167,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv) priv->queue_format == GVE_GQI_QPL_FORMAT; } +static inline bool gve_is_dqo(struct gve_priv *priv) +{ + return priv->queue_format == GVE_DQO_RDA_FORMAT || + priv->queue_format == GVE_DQO_QPL_FORMAT; +} + static inline u32 gve_num_tx_queues(struct gve_priv *priv) { return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; @@ -1249,9 +1253,12 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); -u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); bool gve_header_split_supported(const struct gve_priv *priv); -int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); +int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len, + struct netlink_ext_ack *extack, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); +int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); /* rx buffer handling */ int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs); void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 4f33d094a2ef..b72cc0fa2ba2 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -987,6 +987,10 @@ static void gve_enable_supported_features(struct gve_priv *priv, dev_info(&priv->pdev->dev, "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", priv->max_rx_buffer_size, priv->header_buf_size); + if (gve_is_dqo(priv) && + priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE) + priv->rx_cfg.packet_buffer_size = + priv->max_rx_buffer_size; } /* Read and store ring size ranges given by device */ diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index d0a223250845..52500ae8348e 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev, cmd->rx_pending = priv->rx_desc_cnt; cmd->tx_pending = priv->tx_desc_cnt; + kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size; + if (!gve_header_split_supported(priv)) kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; else if (priv->header_split_enabled) @@ -537,34 +539,6 @@ static void gve_get_ringparam(struct net_device *netdev, kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; } -static int gve_adjust_ring_sizes(struct gve_priv *priv, - u16 new_tx_desc_cnt, - u16 new_rx_desc_cnt) -{ - struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; - struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - int err; - - /* get current queue configuration */ - gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); - - /* copy over the new ring_size from ethtool */ - tx_alloc_cfg.ring_size = new_tx_desc_cnt; - rx_alloc_cfg.ring_size = new_rx_desc_cnt; - - if (netif_running(priv->dev)) { - err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - if (err) - return err; - } - - /* Set new ring_size for the next up */ - priv->tx_desc_cnt = new_tx_desc_cnt; - priv->rx_desc_cnt = new_rx_desc_cnt; - - return 0; -} - static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt, u16 new_rx_desc_cnt) { @@ -584,34 +558,68 @@ static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt return 0; } +static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt, + u16 new_rx_desc_cnt, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +{ + if (new_tx_desc_cnt == priv->tx_desc_cnt && + new_rx_desc_cnt == priv->rx_desc_cnt) + return 0; + + if (!priv->modify_ring_size_enabled) { + dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); + return -EOPNOTSUPP; + } + + if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt)) + return -EINVAL; + + tx_alloc_cfg->ring_size = new_tx_desc_cnt; + rx_alloc_cfg->ring_size = new_rx_desc_cnt; + return 0; +} + static int gve_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *cmd, struct kernel_ethtool_ringparam *kernel_cmd, struct netlink_ext_ack *extack) { + struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; + struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_priv *priv = netdev_priv(netdev); - u16 new_tx_cnt, new_rx_cnt; int err; - err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + + err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack, + &rx_alloc_cfg); if (err) return err; - if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt) - return 0; - - if (!priv->modify_ring_size_enabled) { - dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); - return -EOPNOTSUPP; - } - - new_tx_cnt = cmd->tx_pending; - new_rx_cnt = cmd->rx_pending; + err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split, + &rx_alloc_cfg); + if (err) + return err; - if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt)) - return -EINVAL; + err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending, + &tx_alloc_cfg, &rx_alloc_cfg); + if (err) + return err; - return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt); + if (netif_running(priv->dev)) { + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); + if (err) + return err; + } else { + /* Set ring params for the next up */ + priv->rx_cfg.packet_buffer_size = + rx_alloc_cfg.packet_buffer_size; + priv->header_split_enabled = rx_alloc_cfg.enable_header_split; + priv->tx_desc_cnt = tx_alloc_cfg.ring_size; + priv->rx_desc_cnt = rx_alloc_cfg.ring_size; + } + return 0; } static int gve_user_reset(struct net_device *netdev, u32 *flags) @@ -946,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev, const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, - .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | + ETHTOOL_RING_USE_RX_BUF_LEN, .get_drvinfo = gve_get_drvinfo, .get_strings = gve_get_strings, .get_sset_count = gve_get_sset_count, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 1be1b1ef31ee..6fb8fbb38a7d 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1707,18 +1707,28 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return 0; } -static int verify_xdp_configuration(struct net_device *dev) +static int gve_verify_xdp_configuration(struct net_device *dev, + struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(dev); u16 max_xdp_mtu; if (dev->features & NETIF_F_LRO) { - netdev_warn(dev, "XDP is not supported when LRO is on.\n"); + NL_SET_ERR_MSG_MOD(extack, + "XDP is not supported when LRO is on."); return -EOPNOTSUPP; } if (priv->header_split_enabled) { - netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n"); + NL_SET_ERR_MSG_MOD(extack, + "XDP is not supported when header-data split is enabled."); + return -EOPNOTSUPP; + } + + if (priv->rx_cfg.packet_buffer_size != SZ_2K) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "XDP is not supported for Rx buf len %d, only %d supported.", + priv->rx_cfg.packet_buffer_size, SZ_2K); return -EOPNOTSUPP; } @@ -1727,17 +1737,20 @@ static int verify_xdp_configuration(struct net_device *dev) max_xdp_mtu -= GVE_RX_PAD; if (dev->mtu > max_xdp_mtu) { - netdev_warn(dev, "XDP is not supported for mtu %d.\n", - dev->mtu); + NL_SET_ERR_MSG_FMT_MOD(extack, + "XDP is not supported for mtu %d.", + dev->mtu); return -EOPNOTSUPP; } if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { - netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d", - priv->rx_cfg.num_queues, - priv->tx_cfg.num_queues, + netdev_warn(dev, + "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.", + priv->rx_cfg.num_queues, priv->tx_cfg.num_queues, priv->tx_cfg.max_queues); + NL_SET_ERR_MSG_MOD(extack, + "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); return -EINVAL; } return 0; @@ -1748,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct gve_priv *priv = netdev_priv(dev); int err; - err = verify_xdp_configuration(dev); + err = gve_verify_xdp_configuration(dev, xdp->extack); if (err) return err; switch (xdp->command) { @@ -2041,14 +2054,6 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) priv->tx_timeo_cnt++; } -u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit) -{ - if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE) - return GVE_MAX_RX_BUFFER_SIZE; - else - return GVE_DEFAULT_RX_BUFFER_SIZE; -} - /* Header split is only supported on DQ RDA queue format. If XDP is enabled, * header split is not allowed. */ @@ -2058,12 +2063,42 @@ bool gve_header_split_supported(const struct gve_priv *priv) priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog; } -int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) +int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len, + struct netlink_ext_ack *extack, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +{ + u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size; + + if (rx_buf_len == old_rx_buf_len) + return 0; + + /* device options may not always contain support for 4K buffers */ + if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) { + NL_SET_ERR_MSG_MOD(extack, + "Modifying Rx buf len is not supported"); + return -EOPNOTSUPP; + } + + if (priv->xdp_prog && rx_buf_len != SZ_2K) { + NL_SET_ERR_MSG_MOD(extack, + "Rx buf len can only be 2048 when XDP is on"); + return -EINVAL; + } + + if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) { + NL_SET_ERR_MSG_MOD(extack, + "Rx buf len can only be 2048 or 4096"); + return -EINVAL; + } + rx_alloc_cfg->packet_buffer_size = rx_buf_len; + + return 0; +} + +int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { - struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; - struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; bool enable_hdr_split; - int err = 0; if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) return 0; @@ -2081,14 +2116,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) if (enable_hdr_split == priv->header_split_enabled) return 0; - gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); - - rx_alloc_cfg.enable_header_split = enable_hdr_split; - rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split); + rx_alloc_cfg->enable_header_split = enable_hdr_split; - if (netif_running(priv->dev)) - err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - return err; + return 0; } static int gve_set_features(struct net_device *netdev, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 0b92a2e5e986..068da2fd1fea 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -472,6 +472,22 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } +static void hbg_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + rtnl_lock(); + if (netif_running(netdev)) + dev_close(netdev); + rtnl_unlock(); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static const struct pci_device_id hbg_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, 0x3730), 0}, { } @@ -482,6 +498,7 @@ static struct pci_driver hbg_driver = { .name = "hibmcge", .id_table = hbg_pci_tbl, .probe = hbg_probe, + .shutdown = hbg_shutdown, }; static int __init hbg_module_init(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 3b548f71fa8a..d7c3df1958f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -804,6 +804,11 @@ struct hnae3_ae_ops { int (*dbg_get_read_func)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, read_func *func); + int (*hwtstamp_get)(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config); + int (*hwtstamp_set)(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bfa5568baa92..7a0654e2d3dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2419,6 +2419,35 @@ static int hns3_nic_do_ioctl(struct net_device *netdev, return h->ae_algo->ops->do_ioctl(h, ifr, cmd); } +static int hns3_nic_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_get) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_get(h, config); +} + +static int hns3_nic_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_set) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_set(h, config, extack); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -3048,6 +3077,8 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_rate = hns3_nic_set_vf_rate, .ndo_set_vf_mac = hns3_nic_set_vf_mac, .ndo_select_queue = hns3_nic_select_queue, + .ndo_hwtstamp_get = hns3_nic_hwtstamp_get, + .ndo_hwtstamp_set = hns3_nic_hwtstamp_set, }; bool hns3_is_phys_func(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 782bb48c9f3d..cf8abbe01840 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9444,15 +9444,8 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - switch (cmd) { - case SIOCGHWTSTAMP: - return hclge_ptp_get_cfg(hdev, ifr); - case SIOCSHWTSTAMP: - return hclge_ptp_set_cfg(hdev, ifr); - default: - if (!hdev->hw.mac.phydev) - return hclge_mii_ioctl(hdev, ifr, cmd); - } + if (!hdev->hw.mac.phydev) + return hclge_mii_ioctl(hdev, ifr, cmd); return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); } @@ -12900,6 +12893,8 @@ static const struct hnae3_ae_ops hclge_ops = { .get_dscp_prio = hclge_get_dscp_prio, .get_wol = hclge_get_wol, .set_wol = hclge_set_wol, + .hwtstamp_get = hclge_ptp_get_cfg, + .hwtstamp_set = hclge_ptp_set_cfg, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 4bd52eab3914..0081c5281455 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -204,13 +204,17 @@ static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +int hclge_ptp_get_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) return -EOPNOTSUPP; - return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg, - sizeof(struct hwtstamp_config)) ? -EFAULT : 0; + *config = hdev->ptp->ts_cfg; + return 0; } static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) @@ -269,7 +273,7 @@ static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg) return ret; } -static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, +static int hclge_ptp_set_tx_mode(struct kernel_hwtstamp_config *cfg, unsigned long *flags, u32 *ptp_cfg) { switch (cfg->tx_type) { @@ -287,7 +291,7 @@ static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, return 0; } -static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, +static int hclge_ptp_set_rx_mode(struct kernel_hwtstamp_config *cfg, unsigned long *flags, u32 *ptp_cfg) { int rx_filter = cfg->rx_filter; @@ -332,7 +336,7 @@ static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, } static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, - struct hwtstamp_config *cfg) + struct kernel_hwtstamp_config *cfg) { unsigned long flags = hdev->ptp->flags; u32 ptp_cfg = 0; @@ -359,9 +363,12 @@ static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, return 0; } -int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +int hclge_ptp_set_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config cfg; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; int ret; if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { @@ -369,16 +376,13 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) return -EOPNOTSUPP; } - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - ret = hclge_ptp_set_ts_mode(hdev, &cfg); + ret = hclge_ptp_set_ts_mode(hdev, config); if (ret) return ret; - hdev->ptp->ts_cfg = cfg; + hdev->ptp->ts_cfg = *config; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } int hclge_ptp_get_ts_info(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index 61faddcc3dd0..0162fa5ac146 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -62,7 +62,7 @@ struct hclge_ptp { unsigned long flags; void __iomem *io_base; struct ptp_clock_info info; - struct hwtstamp_config ts_cfg; + struct kernel_hwtstamp_config ts_cfg; spinlock_t lock; /* protects ptp registers */ u32 ptp_cfg; u32 last_tx_seqid; @@ -133,8 +133,11 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); -int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); -int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_get_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config); +int hclge_ptp_set_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); int hclge_ptp_init(struct hclge_dev *hdev); void hclge_ptp_uninit(struct hclge_dev *hdev); int hclge_ptp_get_ts_info(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c index 0fa3c7900225..bbf22811a029 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -304,7 +304,7 @@ static int hinic3_open_channel(struct net_device *netdev) err = hinic3_configure(netdev); if (err) { - netdev_err(netdev, "Failed to init txrxq irq\n"); + netdev_err(netdev, "Failed to configure device resources\n"); goto err_uninit_qps_irq; } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 122ee23497e6..288fa8ce53af 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -296,6 +296,7 @@ config ICE depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB + select LIBETH_XDP select LIBIE select LIBIE_ADMINQ select LIBIE_FWLOG if DEBUG_FS diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 018e61aea787..aa08f397988e 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -461,6 +461,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) #define FLAG2_ENABLE_S0IX_FLOWS BIT(15) +#define FLAG2_DISABLE_K1 BIT(16) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 8e40bb50a01e..cee57a2149ab 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -26,6 +26,8 @@ struct e1000_stats { static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = { #define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0) "s0ix-enabled", +#define E1000E_PRIV_FLAGS_DISABLE_K1 BIT(1) + "disable-k1", }; #define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings) @@ -2301,26 +2303,59 @@ static u32 e1000e_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED; + if (adapter->flags2 & FLAG2_DISABLE_K1) + priv_flags |= E1000E_PRIV_FLAGS_DISABLE_K1; + return priv_flags; } static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; unsigned int flags2 = adapter->flags2; + unsigned int changed; - flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS; - if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) { - struct e1000_hw *hw = &adapter->hw; + flags2 &= ~(FLAG2_ENABLE_S0IX_FLOWS | FLAG2_DISABLE_K1); - if (hw->mac.type < e1000_pch_cnp) + if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) { + if (hw->mac.type < e1000_pch_cnp) { + e_err("S0ix is not supported on this device\n"); return -EINVAL; + } + flags2 |= FLAG2_ENABLE_S0IX_FLOWS; } - if (flags2 != adapter->flags2) + if (priv_flags & E1000E_PRIV_FLAGS_DISABLE_K1) { + if (hw->mac.type < e1000_ich8lan) { + e_err("Disabling K1 is not supported on this device\n"); + return -EINVAL; + } + + flags2 |= FLAG2_DISABLE_K1; + } + + changed = adapter->flags2 ^ flags2; + if (changed) adapter->flags2 = flags2; + if (changed & FLAG2_DISABLE_K1) { + /* reset the hardware to apply the changes */ + while (test_and_set_bit(__E1000_RESETTING, + &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + } + return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index df4e7d781cb1..0ff8688ac3b8 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -286,21 +286,26 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) } /** - * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to - * align to MTP and later platform requirements. + * e1000_reconfigure_k1_params - reconfigure Kumeran K1 parameters. * @hw: pointer to the HW structure * + * By default K1 is enabled after MAC reset, so this function only + * disables it. + * * Context: PHY semaphore must be held by caller. * Return: 0 on success, negative on failure */ -static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) +static s32 e1000_reconfigure_k1_params(struct e1000_hw *hw) { u16 phy_timeout; u32 fextnvm12; s32 ret_val; - if (hw->mac.type < e1000_pch_mtp) + if (hw->mac.type < e1000_pch_mtp) { + if (hw->adapter->flags2 & FLAG2_DISABLE_K1) + return e1000_configure_k1_ich8lan(hw, false); return 0; + } /* Change Kumeran K1 power down state from P0s to P1 */ fextnvm12 = er32(FEXTNVM12); @@ -310,6 +315,8 @@ static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) /* Wait for the interface the settle */ usleep_range(1000, 1100); + if (hw->adapter->flags2 & FLAG2_DISABLE_K1) + return e1000_configure_k1_ich8lan(hw, false); /* Change K1 exit timeout */ ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG, @@ -373,8 +380,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) /* At this point the PHY might be inaccessible so don't * propagate the failure */ - if (e1000_reconfigure_k1_exit_timeout(hw)) - e_dbg("Failed to reconfigure K1 exit timeout\n"); + if (e1000_reconfigure_k1_params(hw)) + e_dbg("Failed to reconfigure K1 parameters\n"); fallthrough; case e1000_pch_lpt: @@ -473,10 +480,10 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (hw->mac.type >= e1000_pch_mtp) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) { - e_err("Failed to reconfigure K1 exit timeout\n"); + e_err("Failed to reconfigure K1 parameters\n"); goto out; } - ret_val = e1000_reconfigure_k1_exit_timeout(hw); + ret_val = e1000_reconfigure_k1_params(hw); hw->phy.ops.release(hw); } } @@ -4948,17 +4955,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) u16 i; e1000_initialize_hw_bits_ich8lan(hw); - if (hw->mac.type >= e1000_pch_mtp) { - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_reconfigure_k1_exit_timeout(hw); - hw->phy.ops.release(hw); - if (ret_val) { - e_dbg("Error failed to reconfigure K1 exit timeout\n"); - return ret_val; - } + ret_val = e1000_reconfigure_k1_params(hw); + hw->phy.ops.release(hw); + if (ret_val) { + e_dbg("Error failed to reconfigure K1 parameters\n"); + return ret_val; } /* Initialize identification LED */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 201322dac233..116f3c92b5bc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7675,6 +7675,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* init PTP hardware clock */ e1000e_ptp_init(adapter); + if (hw->mac.type >= e1000_pch_mtp) + adapter->flags2 |= FLAG2_DISABLE_K1; + /* reset the hardware with the new settings */ e1000e_reset(adapter); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 801a57a925da..d2d03db2acec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -574,6 +574,10 @@ struct i40e_pf { struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; + /* If set to non-zero, the device uses this value + * as maximum number of MAC filters per VF. + */ + u32 max_mac_per_vf; u32 arq_overflows; /* Not fatal, possibly indicative of problems */ struct ratelimit_state mdd_message_rate_limit; /* DCBx/DCBNL capability for PF that indicates diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c index cc4e9e2addb7..bc205e3077c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c @@ -5,6 +5,41 @@ #include "i40e.h" #include "i40e_devlink.h" +static int i40e_max_mac_per_vf_set(struct devlink *devlink, + u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct i40e_pf *pf = devlink_priv(devlink); + + if (pf->num_alloc_vfs > 0) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change max_mac_per_vf while SR-IOV is enabled"); + return -EBUSY; + } + + pf->max_mac_per_vf = ctx->val.vu32; + return 0; +} + +static int i40e_max_mac_per_vf_get(struct devlink *devlink, + u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct i40e_pf *pf = devlink_priv(devlink); + + ctx->val.vu32 = pf->max_mac_per_vf; + return 0; +} + +static const struct devlink_param i40e_dl_params[] = { + DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + i40e_max_mac_per_vf_get, + i40e_max_mac_per_vf_set, + NULL), +}; + static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len) { u8 dsn[8]; @@ -165,7 +200,18 @@ void i40e_free_pf(struct i40e_pf *pf) **/ void i40e_devlink_register(struct i40e_pf *pf) { - devlink_register(priv_to_devlink(pf)); + struct devlink *dl = priv_to_devlink(pf); + struct device *dev = &pf->pdev->dev; + int err; + + err = devlink_params_register(dl, i40e_dl_params, + ARRAY_SIZE(i40e_dl_params)); + if (err) + dev_err(dev, + "devlink params register failed with error %d", err); + + devlink_register(dl); + } /** @@ -176,7 +222,11 @@ void i40e_devlink_register(struct i40e_pf *pf) **/ void i40e_devlink_unregister(struct i40e_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); + struct devlink *dl = priv_to_devlink(pf); + + devlink_unregister(dl); + devlink_params_unregister(dl, i40e_dl_params, + ARRAY_SIZE(i40e_dl_params)); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 081a4526a2f0..9d91a382612d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2935,33 +2935,48 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, if (!f) ++mac_add_cnt; } - - /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. + /* Determine the maximum number of MAC addresses this VF may use. + * + * - For untrusted VFs: use a fixed small limit. + * + * - For trusted VFs: limit is calculated by dividing total MAC + * filter pool across all VFs/ports. * - * If this VF is trusted, it can use more resources than untrusted. - * However to ensure that every trusted VF has appropriate number of - * resources, divide whole pool of resources per port and then across - * all VFs. + * - User can override this by devlink param "max_mac_per_vf". + * If set its value is used as a strict cap for both trusted and + * untrusted VFs. + * Note: + * even when overridden, this is a theoretical maximum; hardware + * may reject additional MACs if the absolute HW limit is reached. */ if (!vf_trusted) mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; else mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); + if (pf->max_mac_per_vf > 0) + mac_add_max = pf->max_mac_per_vf; + /* VF can replace all its filters in one step, in this case mac_add_max * will be added as active and another mac_add_max will be in * a to-be-removed state. Account for that. */ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { + if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n", + mac_add_max); + return -EPERM; + } if (!vf_trusted) { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); return -EPERM; } else { dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); + "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n", + mac_add_max); return -EPERM; } } @@ -4788,6 +4803,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) unsigned long q_map; struct i40e_vf *vf; int abs_vf_id; + int old_link; int ret = 0; int tmp; @@ -4806,6 +4822,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* skip VF link state change if requested state is already set */ + if (!vf->link_forced) + old_link = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + old_link = IFLA_VF_LINK_STATE_ENABLE; + else + old_link = IFLA_VF_LINK_STATE_DISABLE; + + if (link == old_link) + goto error_out; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c index a9e1da35e248..4d12dfe1b481 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -91,6 +91,55 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) } /** + * iavf_fill_adv_rss_gtp_hdr - Fill GTP-related RSS protocol headers + * @proto_hdrs: pointer to the virtchnl protocol headers structure to populate + * @packet_hdrs: bitmask of packet header types to configure + * @hash_flds: RSS hash field configuration + * + * This function populates the virtchnl protocol header structure with + * appropriate GTP-related header types based on the specified packet_hdrs. + * It supports GTPC, GTPU with extension headers, and uplink/downlink PDU + * types. For certain GTPU types, it also appends an IPv4 header to enable + * hashing on the destination IP address. + * + * Return: 0 on success or -EOPNOTSUPP if the packet_hdrs value is unsupported. + */ +static int +iavf_fill_adv_rss_gtp_hdr(struct virtchnl_proto_hdrs *proto_hdrs, + u32 packet_hdrs, u64 hash_flds) +{ + struct virtchnl_proto_hdr *hdr; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID: + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPC); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP); + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN); + fallthrough; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP: + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/** * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message * @rss_cfg: the virtchnl message to be filled with RSS configuration setting * @packet_hdrs: the RSS configuration protocol header types @@ -103,6 +152,8 @@ int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, u32 packet_hdrs, u64 hash_flds, bool symm) { + const u32 packet_l3_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3; + const u32 packet_l4_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4; struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; struct virtchnl_proto_hdr *hdr; @@ -113,31 +164,41 @@ iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, proto_hdrs->tunnel_level = 0; /* always outer layer */ - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: - iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: - iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); - break; - default: - return -EINVAL; + if (packet_l3_hdrs) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_l3_hdrs) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } } - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { - case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: - iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: - iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: - iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); - break; - default: - return -EINVAL; + if (packet_l4_hdrs) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_l4_hdrs) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + } + + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + if (iavf_fill_adv_rss_gtp_hdr(proto_hdrs, packet_hdrs, hash_flds)) + return -EINVAL; } return 0; @@ -186,6 +247,8 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, proto = "UDP"; else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) proto = "SCTP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) + proto = "GTP"; else return; @@ -211,6 +274,16 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) strcat(hash_opt, "dst port,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPC_TEID) + strcat(hash_opt, "gtp-c,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID) + strcat(hash_opt, "gtp-u ip,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID) + strcat(hash_opt, "gtp-u ext,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID) + strcat(hash_opt, "gtp-u ul,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID) + strcat(hash_opt, "gtp-u dl,"); if (!action) action = ""; diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h index e31eb2afebea..74cc9e0d528c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -22,6 +22,12 @@ enum iavf_adv_rss_flow_seg_hdr { IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004, IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008, IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC = 0x00000400, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP = 0x00001000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH = 0x00002000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP = 0x00008000, }; #define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \ @@ -33,6 +39,14 @@ enum iavf_adv_rss_flow_seg_hdr { IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) +#define IAVF_ADV_RSS_FLOW_SEG_HDR_GTP \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP) + enum iavf_adv_rss_flow_field { /* L3 */ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA, @@ -46,6 +60,17 @@ enum iavf_adv_rss_flow_field { IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT, IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT, IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT, + /* GTPC_TEID */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID, + /* GTPU_IP */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID, + /* GTPU_EH */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID, + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_QFI, + /* GTPU_UP */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID, + /* GTPU_DWN */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID, /* The total number of enums must not exceed 64 */ IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX @@ -72,6 +97,12 @@ enum iavf_adv_rss_flow_field { BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT) #define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_GTPC_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID) /* bookkeeping of advanced RSS configuration */ struct iavf_adv_rss { diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 05d72be3fe80..a3f8ced23266 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1336,6 +1336,56 @@ static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd) hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; break; + case GTPU_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPC_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPC_TEID_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_EH_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_UL_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_DL_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPC_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPC_TEID_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_EH_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_UL_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_DL_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; default: break; } @@ -1353,6 +1403,12 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: + case GTPU_V4_FLOW: + case GTPC_V4_FLOW: + case GTPC_TEID_V4_FLOW: + case GTPU_EH_V4_FLOW: + case GTPU_UL_V4_FLOW: + case GTPU_DL_V4_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; if (cmd->data & RXH_IP_DST) @@ -1361,6 +1417,12 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: + case GTPU_V6_FLOW: + case GTPC_V6_FLOW: + case GTPC_TEID_V6_FLOW: + case GTPU_EH_V6_FLOW: + case GTPU_UL_V6_FLOW: + case GTPU_DL_V6_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; if (cmd->data & RXH_IP_DST) @@ -1382,6 +1444,7 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) break; case UDP_V4_FLOW: case UDP_V6_FLOW: + case GTPC_V4_FLOW: if (cmd->data & RXH_L4_B_0_1) hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; if (cmd->data & RXH_L4_B_2_3) @@ -1398,6 +1461,32 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) break; } } + if (cmd->data & RXH_GTP_TEID) { + switch (cmd->flow_type) { + case GTPC_TEID_V4_FLOW: + case GTPC_TEID_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID; + break; + case GTPU_V4_FLOW: + case GTPU_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID; + break; + case GTPU_EH_V4_FLOW: + case GTPU_EH_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID; + break; + case GTPU_UL_V4_FLOW: + case GTPU_UL_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID; + break; + case GTPU_DL_V4_FLOW: + case GTPU_DL_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID; + break; + default: + break; + } + } return hfld; } diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index fb2de521731a..938914abbe06 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -459,6 +459,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf) rtnl_lock(); ice_vsi_decfg(ice_get_main_vsi(pf)); rtnl_unlock(); + ice_deinit_pf(pf); ice_deinit_dev(pf); } @@ -1231,11 +1232,13 @@ static void ice_set_min_max_msix(struct ice_pf *pf) static int ice_devlink_reinit_up(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct device *dev = ice_pf_to_dev(pf); + bool need_dev_deinit = false; int err; err = ice_init_hw(&pf->hw); if (err) { - dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err); + dev_err(dev, "ice_init_hw failed: %d\n", err); return err; } @@ -1246,13 +1249,19 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) if (err) goto unroll_hw_init; + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); + goto unroll_dev_init; + } + vsi->flags = ICE_VSI_FLAG_INIT; rtnl_lock(); err = ice_vsi_cfg(vsi); rtnl_unlock(); if (err) - goto err_vsi_cfg; + goto unroll_pf_init; /* No need to take devl_lock, it's already taken by devlink API */ err = ice_load(pf); @@ -1265,10 +1274,14 @@ err_load: rtnl_lock(); ice_vsi_decfg(vsi); rtnl_unlock(); -err_vsi_cfg: - ice_deinit_dev(pf); +unroll_pf_init: + ice_deinit_pf(pf); +unroll_dev_init: + need_dev_deinit = true; unroll_hw_init: ice_deinit_hw(&pf->hw); + if (need_dev_deinit) + ice_deinit_dev(pf); return err; } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 22b8323ff0d0..147aaee192a7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -351,6 +351,7 @@ struct ice_vsi { u16 num_q_vectors; /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; + bool hsplit:1; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -374,6 +375,8 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; + u16 max_frame; + struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ @@ -509,7 +512,6 @@ enum ice_pf_flags { ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ - ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_VF_VLAN_PRUNING, @@ -1029,11 +1031,15 @@ int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +void ice_start_service_task(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); +void ice_init_dev_hw(struct ice_pf *pf); int ice_init_dev(struct ice_pf *pf); void ice_deinit_dev(struct ice_pf *pf); +int ice_init_pf(struct ice_pf *pf); +void ice_deinit_pf(struct ice_pf *pf); int ice_change_mtu(struct net_device *netdev, int new_mtu); void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 2d35a278c555..eadb1e3d12b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <net/xdp_sock_drv.h> +#include <linux/net/intel/libie/rx.h> #include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" @@ -462,19 +463,6 @@ u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - return 0; -} - -/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -536,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) else rlan_ctx.l2tsel = 1; - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + if (ring->hdr_pp) { + rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + + /* + * If the frame is TCP/UDP/SCTP, it will be split by the + * payload. + * If not, but it's an IPv4/IPv6 frame, it will be split by + * the IP header. + * If not IP, it will be split by the Ethernet header. + * + * In any case, the header buffer will never be left empty. + */ + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 | + ICE_RLAN_RX_HSPLIT_0_SPLIT_IP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; + } else { + rlan_ctx.hbuf = 0; + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + } + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; /* This controls whether VLAN is stripped from inner headers @@ -549,7 +558,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, ring->max_frame, + rlan_ctx.rxmax = min_t(u32, vsi->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -586,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) if (vsi->type == ICE_VSI_VF) return 0; - /* configure Rx buffer alignment */ - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - ice_clear_ring_build_skb_ena(ring); - else - ice_set_ring_build_skb_ena(ring); - - ring->rx_offset = ice_rx_offset(ring); - /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); @@ -601,36 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } -static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +static int ice_rxq_pp_create(struct ice_rx_ring *rq) { - void *ctx_ptr = &ring->pkt_ctx; - struct xsk_cb_desc desc = {}; - - XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); - desc.src = &ctx_ptr; - desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - - sizeof(struct xdp_buff); - desc.bytes = sizeof(ctx_ptr); - xsk_pool_fill_cb(ring->xsk_pool, &desc); -} + struct libeth_fq fq = { + .count = rq->count, + .nid = NUMA_NO_NODE, + .hsplit = rq->vsi->hsplit, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + .buf_len = LIBIE_MAX_RX_BUF_LEN, + }; + int err; -/** - * ice_get_frame_sz - calculate xdp_buff::frame_sz - * @rx_ring: the ring being configured - * - * Return frame size based on underlying PAGE_SIZE - */ -static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) -{ - unsigned int frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + return err; + + rq->pp = fq.pp; + rq->rx_fqes = fq.fqes; + rq->truesize = fq.truesize; + rq->rx_buf_len = fq.buf_len; -#if (PAGE_SIZE >= 8192) - frame_sz = rx_ring->rx_buf_len; -#else - frame_sz = ice_rx_pg_size(rx_ring) / 2; -#endif + if (!fq.hsplit) + return 0; + + fq = (struct libeth_fq){ + .count = rq->count, + .type = LIBETH_FQE_HDR, + .nid = NUMA_NO_NODE, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + }; - return frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + goto destroy; + + rq->hdr_pp = fq.pp; + rq->hdr_fqes = fq.fqes; + rq->hdr_truesize = fq.truesize; + rq->rx_hdr_len = fq.buf_len; + + return 0; + +destroy: + ice_rxq_pp_destroy(rq); + + return err; } /** @@ -642,7 +658,8 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); - u32 num_bufs = ICE_RX_DESC_UNUSED(ring); + u32 num_bufs = ICE_DESC_UNUSED(ring); + u32 rx_buf_len; int err; if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { @@ -656,15 +673,19 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } ice_rx_xsk_pool(ring); + err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); + if (err) + return err; + if (ring->xsk_pool) { xdp_rxq_info_unreg(&ring->xdp_rxq); - ring->rx_buf_len = + rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, - ring->rx_buf_len); + rx_buf_len); if (err) return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -673,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); - ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { + err = ice_rxq_pp_create(ring); + if (err) + return err; + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, ring->rx_buf_len); if (err) - return err; + goto err_destroy_fq; } - - err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (err) - return err; + xdp_rxq_info_attach_page_pool(&ring->xdp_rxq, + ring->pp); } } - xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); ring->xdp.data = NULL; - ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", ring->q_index, err); - return err; + goto err_destroy_fq; } if (ring->xsk_pool) { @@ -730,9 +748,17 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (ring->vsi->type == ICE_VSI_CTRL) ice_init_ctrl_rx_descs(ring, num_bufs); else - ice_alloc_rx_bufs(ring, num_bufs); + err = ice_alloc_rx_bufs(ring, num_bufs); + + if (err) + goto err_destroy_fq; return 0; + +err_destroy_fq: + ice_rxq_pp_destroy(ring); + + return err; } int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) @@ -753,18 +779,10 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) */ static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; - ring->rx_buf_len = ICE_RXBUF_1664; -#if (PAGE_SIZE < 8192) - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; -#endif + if (!vsi->netdev) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; } else { - ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - ring->rx_buf_len = ICE_RXBUF_3072; + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; } } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2532b6f82e97..046bc9c65c51 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1161,6 +1161,9 @@ int ice_init_hw(struct ice_hw *hw) status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; + + ice_init_dev_hw(hw->back); + mutex_init(&hw->tnl_lock); ice_init_chk_recipe_reuse_support(hw); @@ -3389,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E825C_SGMII: return true; default: return false; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index dc131779d426..a1d9abee97e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -10,6 +10,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" #include <net/dcbnl.h> +#include <net/libeth/rx.h> struct ice_stats { char stat_string[ETH_GSTRING_LEN]; @@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_FLAG_VF_TRUE_PROMISC_ENA), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), - ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -794,8 +794,7 @@ static int ice_get_extended_regs(struct net_device *netdev, void *p) static void ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; u32 *regs_buf = (u32 *)p; unsigned int i; @@ -810,8 +809,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) static u32 ice_get_msglevel(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); #ifndef CONFIG_DYNAMIC_DEBUG if (pf->hw.debug_mask) @@ -824,8 +822,7 @@ static u32 ice_get_msglevel(struct net_device *netdev) static void ice_set_msglevel(struct net_device *netdev, u32 data) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); #ifndef CONFIG_DYNAMIC_DEBUG if (ICE_DBG_USER & data) @@ -840,16 +837,14 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data) static void ice_get_link_ext_stats(struct net_device *netdev, struct ethtool_link_ext_stats *stats) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); stats->link_down_events = pf->link_down_events; } static int ice_get_eeprom_len(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); return (int)pf->hw.flash.flash_size; } @@ -858,9 +853,7 @@ static int ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; struct device *dev; int ret; @@ -959,8 +952,7 @@ static u64 ice_link_test(struct net_device *netdev) */ static u64 ice_eeprom_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); netdev_info(netdev, "EEPROM test\n"); return !!(ice_nvm_validate_checksum(&pf->hw)); @@ -1239,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) */ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; int valid_frames, i; + struct page *page; u8 *received_buf; valid_frames = 0; @@ -1255,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; - rx_buf = &rx_ring->rx_buf[i]; - received_buf = page_address(rx_buf->page) + rx_buf->page_offset; + rx_buf = &rx_ring->rx_fqes[i]; + page = __netmem_to_page(rx_buf->netmem); + received_buf = page_address(page) + rx_buf->offset + + page->pp->p.offset; if (ice_lbtest_check_frame(received_buf)) valid_frames++; @@ -1274,9 +1269,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) */ static u64 ice_loopback_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *orig_vsi = np->vsi, *test_vsi; - struct ice_pf *pf = orig_vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *test_vsi; u8 *tx_frame __free(kfree) = NULL; u8 broadcast[ETH_ALEN], ret = 0; int num_frames, valid_frames; @@ -1365,8 +1359,7 @@ lbtest_vsi_close: */ static u64 ice_intr_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); u16 swic_old = pf->sw_int_count; netdev_info(netdev, "interrupt test\n"); @@ -1394,9 +1387,8 @@ static void ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { - struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = ice_netdev_to_pf(netdev); bool if_running = netif_running(netdev); - struct ice_pf *pf = np->vsi->back; struct device *dev; dev = ice_pf_to_dev(pf); @@ -1720,9 +1712,7 @@ static int ice_nway_reset(struct net_device *netdev) */ static u32 ice_get_priv_flags(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); u32 i, ret_flags = 0; for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { @@ -1869,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ice_nway_reset(netdev); } } - if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { - /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down_up(vsi); - } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported */ @@ -3165,6 +3151,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, ring->rx_jumbo_max_pending = 0; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; + + kernel_ring->tcp_data_split = vsi->hsplit ? + ETHTOOL_TCP_DATA_SPLIT_ENABLED : + ETHTOOL_TCP_DATA_SPLIT_DISABLED; } static int @@ -3181,6 +3171,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, int i, timeout = 50, err = 0; struct ice_hw *hw = &pf->hw; u16 new_rx_cnt, new_tx_cnt; + bool hsplit; if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || ring->tx_pending < ICE_MIN_NUM_DESC || @@ -3206,9 +3197,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", new_rx_cnt); + hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && - new_rx_cnt == vsi->rx_rings[0]->count) { + new_rx_cnt == vsi->rx_rings[0]->count && + hsplit == vsi->hsplit) { netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; } @@ -3238,6 +3232,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, vsi->xdp_rings[i]->count = new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt; + vsi->hsplit = hsplit; + netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); goto done; } @@ -3321,7 +3317,8 @@ process_rx: rx_rings[i].count = new_rx_cnt; rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; rx_rings[i].desc = NULL; - rx_rings[i].rx_buf = NULL; + rx_rings[i].xdp_buf = NULL; + /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ @@ -3330,10 +3327,6 @@ process_rx: err = ice_setup_rx_ring(&rx_rings[i]); if (err) goto rx_unwind; - - /* allocate Rx buffers */ - err = ice_alloc_rx_bufs(&rx_rings[i], - ICE_RX_DESC_UNUSED(&rx_rings[i])); rx_unwind: if (err) { while (i) { @@ -3347,6 +3340,8 @@ rx_unwind: } process_link: + vsi->hsplit = hsplit; + /* Bring interface down, copy in the new ring info, then restore the * interface. if VSI is up, bring it down and then back up */ @@ -4417,9 +4412,7 @@ static int ice_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; u8 sff8472_comp = 0; u8 sff8472_swap = 0; @@ -4491,12 +4484,10 @@ static int ice_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { - struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = ice_netdev_to_pf(netdev); #define SFF_READ_BLOCK_SIZE 8 u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; u8 addr = ICE_I2C_EEPROM_DEV_ADDR; - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; bool is_sfp = false; unsigned int i, j; @@ -4661,6 +4652,98 @@ static void ice_get_fec_stats(struct net_device *netdev, pi->lport, err); } +static void ice_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + mac_stats->FramesTransmittedOK = ps->eth.tx_unicast + + ps->eth.tx_multicast + + ps->eth.tx_broadcast; + mac_stats->FramesReceivedOK = ps->eth.rx_unicast + + ps->eth.rx_multicast + + ps->eth.rx_broadcast; + mac_stats->FrameCheckSequenceErrors = ps->crc_errors; + mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes; + mac_stats->OctetsReceivedOK = ps->eth.rx_bytes; + mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast; + mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast; + mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast; + mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast; + mac_stats->InRangeLengthErrors = ps->rx_len_errors; + mac_stats->FrameTooLongErrors = ps->rx_oversize; +} + +static void ice_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *pause_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx; + pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx; +} + +static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1522 }, + { 1523, 9522 }, + {} +}; + +static void ice_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + rmon->undersize_pkts = ps->rx_undersize; + rmon->oversize_pkts = ps->rx_oversize; + rmon->fragments = ps->rx_fragments; + rmon->jabbers = ps->rx_jabber; + + rmon->hist[0] = ps->rx_size_64; + rmon->hist[1] = ps->rx_size_127; + rmon->hist[2] = ps->rx_size_255; + rmon->hist[3] = ps->rx_size_511; + rmon->hist[4] = ps->rx_size_1023; + rmon->hist[5] = ps->rx_size_1522; + rmon->hist[6] = ps->rx_size_big; + + rmon->hist_tx[0] = ps->tx_size_64; + rmon->hist_tx[1] = ps->tx_size_127; + rmon->hist_tx[2] = ps->tx_size_255; + rmon->hist_tx[3] = ps->tx_size_511; + rmon->hist_tx[4] = ps->tx_size_1023; + rmon->hist_tx[5] = ps->tx_size_1522; + rmon->hist_tx[6] = ps->tx_size_big; + + *ranges = ice_rmon_ranges; +} + +/* ice_get_ts_stats - provide timestamping stats + * @netdev: the netdevice pointer from ethtool + * @ts_stats: the ethtool data structure to fill in + */ +static void ice_get_ts_stats(struct net_device *netdev, + struct ethtool_ts_stats *ts_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_ptp *ptp = &pf->ptp; + + ts_stats->pkts = ptp->tx_hwtstamp_good; + ts_stats->err = ptp->tx_hwtstamp_skipped + + ptp->tx_hwtstamp_flushed + + ptp->tx_hwtstamp_discarded; + ts_stats->lost = ptp->tx_hwtstamp_timeouts; +} + #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ ETH_RESET_FILTER | ETH_RESET_OFFLOAD) @@ -4682,8 +4765,7 @@ static void ice_get_fec_stats(struct net_device *netdev, */ static int ice_ethtool_reset(struct net_device *dev, u32 *flags) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); enum ice_reset_req reset; switch (*flags) { @@ -4741,9 +4823,14 @@ static const struct ethtool_ops ice_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, .supported_input_xfrm = RXH_XFRM_SYM_XOR, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_fec_stats = ice_get_fec_stats, + .get_eth_mac_stats = ice_get_eth_mac_stats, + .get_pause_stats = ice_get_pause_stats, + .get_rmon_stats = ice_get_rmon_stats, + .get_ts_stats = ice_get_ts_stats, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 013c93b6605e..c0dbec369366 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -574,9 +574,7 @@ ice_destroy_tunnel_end: int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); enum ice_tunnel_type tnl_type; int status; u16 index; @@ -598,9 +596,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); enum ice_tunnel_type tnl_type; int status; @@ -3582,6 +3578,19 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, } /** + * ice_set_tcam_flags - set TCAM flag don't care mask + * @mask: mask for flags + * @dc_mask: pointer to the don't care mask + */ +static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ]) +{ + u16 inverted_mask = ~mask; + + /* flags are lowest u16 */ + put_unaligned_le16(inverted_mask, dc_mask); +} + +/** * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list * @hw: pointer to the HW struct * @idx: the index of the TCAM entry to remove @@ -3651,6 +3660,9 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, if (!p) return -ENOMEM; + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(tcam->attr.mask, dc_msk); + status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, tcam->attr.flags, vl_msk, dc_msk, nm_msk); @@ -3677,6 +3689,34 @@ err_ice_prof_tcam_ena_dis: } /** + * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use + * @ptg_attr: pointer to the PTG and attribute pair to check + * @ptgs_used: bitmap that denotes which PTGs are in use + * @attr_used: array of PTG and attributes pairs already used + * @attr_cnt: count of entries in the attr_used array + * + * Return: true if the PTG and attribute pair is in use, false otherwise. + */ +static bool +ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used, + struct ice_tcam_inf *attr_used[], u16 attr_cnt) +{ + u16 i; + + if (!test_bit(ptg_attr->ptg, ptgs_used)) + return false; + + /* the PTG is used, so now look for correct attributes */ + for (i = 0; i < attr_cnt; i++) + if (attr_used[i]->ptg == ptg_attr->ptg && + attr_used[i]->attr.flags == ptg_attr->attr.flags && + attr_used[i]->attr.mask == ptg_attr->attr.mask) + return true; + + return false; +} + +/** * ice_adj_prof_priorities - adjust profile based on priorities * @hw: pointer to the HW struct * @blk: hardware block @@ -3688,10 +3728,16 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); + struct ice_tcam_inf **attr_used; struct ice_vsig_prof *t; - int status; + u16 attr_used_cnt = 0; + int status = 0; u16 idx; + attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL); + if (!attr_used) + return -ENOMEM; + bitmap_zero(ptgs_used, ICE_XLT1_CNT); idx = vsig & ICE_VSIG_IDX_M; @@ -3709,11 +3755,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 i; for (i = 0; i < t->tcam_count; i++) { + bool used; + /* Scan the priorities from newest to oldest. * Make sure that the newest profiles take priority. */ - if (test_bit(t->tcam[i].ptg, ptgs_used) && - t->tcam[i].in_use) { + used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used, + attr_used, attr_used_cnt); + + if (used && t->tcam[i].in_use) { /* need to mark this PTG as never match, as it * was already in use and therefore duplicate * (and lower priority) @@ -3723,9 +3773,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, &t->tcam[i], chg); if (status) - return status; - } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && - !t->tcam[i].in_use) { + goto free_attr_used; + } else if (!used && !t->tcam[i].in_use) { /* need to enable this PTG, as it in not in use * and not enabled (highest priority) */ @@ -3734,15 +3783,21 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, &t->tcam[i], chg); if (status) - return status; + goto free_attr_used; } /* keep track of used ptgs */ - __set_bit(t->tcam[i].ptg, ptgs_used); + set_bit(t->tcam[i].ptg, ptgs_used); + if (attr_used_cnt < ICE_MAX_PTG_ATTRS) + attr_used[attr_used_cnt++] = &t->tcam[i]; + else + ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); } } - return 0; +free_attr_used: + kfree(attr_used); + return status; } /** @@ -3825,11 +3880,15 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, p->vsig = vsig; p->tcam_idx = t->tcam[i].tcam_idx; + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk); + /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, t->tcam[i].prof_id, - t->tcam[i].ptg, vsig, 0, 0, - vl_msk, dc_msk, nm_msk); + t->tcam[i].ptg, vsig, 0, + t->tcam[i].attr.flags, vl_msk, + dc_msk, nm_msk); if (status) { devm_kfree(ice_hw_to_dev(hw), p); goto err_ice_add_prof_id_vsig; @@ -4143,9 +4202,6 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_num; int status; - if (blk != ICE_BLK_FD) - return -EINVAL; - vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); if (status) { @@ -4154,6 +4210,9 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, return status; } + if (blk != ICE_BLK_FD) + return 0; + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); if (status) { diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 817beca591e0..80c9e7c749c2 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -187,6 +187,7 @@ struct ice_prof_map { }; #define ICE_INVALID_TCAM 0xFFFF +#define ICE_MAX_PTG_ATTRS 1024 struct ice_tcam_inf { u16 tcam_idx; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 6d5c939dc8a5..c9b6d0a84bd1 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -5,6 +5,38 @@ #include "ice_flow.h" #include <net/gre.h> +/* Size of known protocol header fields */ +#define ICE_FLOW_FLD_SZ_ETH_TYPE 2 +#define ICE_FLOW_FLD_SZ_VLAN 2 +#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 +#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 +#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4 +#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6 +#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8 +#define ICE_FLOW_FLD_SZ_IPV4_ID 2 +#define ICE_FLOW_FLD_SZ_IPV6_ID 4 +#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4 +#define ICE_FLOW_FLD_SZ_IP_DSCP 1 +#define ICE_FLOW_FLD_SZ_IP_TTL 1 +#define ICE_FLOW_FLD_SZ_IP_PROT 1 +#define ICE_FLOW_FLD_SZ_PORT 2 +#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 +#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 +#define ICE_FLOW_FLD_SZ_ICMP_CODE 1 +#define ICE_FLOW_FLD_SZ_ARP_OPER 2 +#define ICE_FLOW_FLD_SZ_GRE_KEYID 4 +#define ICE_FLOW_FLD_SZ_GTP_TEID 4 +#define ICE_FLOW_FLD_SZ_GTP_QFI 2 +#define ICE_FLOW_FLD_SZ_PFCP_SEID 8 +#define ICE_FLOW_FLD_SZ_ESP_SPI 4 +#define ICE_FLOW_FLD_SZ_AH_SPI 4 +#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 +#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2 +#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2 + /* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; @@ -20,6 +52,7 @@ struct ice_flow_field_info { .mask = 0, \ } +/* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */ #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ @@ -61,7 +94,33 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_IPV6_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, + ICE_FLOW_FLD_SZ_IPV4_ID), + /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, + ICE_FLOW_FLD_SZ_IPV6_ID), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), /* Transport */ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), @@ -76,7 +135,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), + /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8, + ICE_FLOW_FLD_SZ_SCTP_CHKSUM), /* ARP */ /* ICE_FLOW_FIELD_IDX_ARP_SIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), @@ -108,9 +174,17 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), 0x3f00), /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* PPPoE */ /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), @@ -128,7 +202,16 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), /* NAT_T_ESP */ /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, + ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI), + /* L2TPV2 */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12, + ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID), + /* L2TPV2_LEN */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14, + ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID), }; /* Bitmaps indicating relevant packet types for a particular protocol header @@ -137,9 +220,9 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { */ static const u32 ice_ptypes_mac_ofos[] = { 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, - 0x0000077E, 0x00000000, 0x00000000, 0x00000000, - 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000077E, 0x000003FF, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707, + 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -162,10 +245,10 @@ static const u32 ice_ptypes_macvlan_il[] = { * include IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, - 0x00000000, 0x000FC000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x000002A0, 0x00000000, + 0x00015000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -176,10 +259,10 @@ static const u32 ice_ptypes_ipv4_ofos[] = { * IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos_all[] = { - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, - 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101, + 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -191,7 +274,7 @@ static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x001FF800, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -202,10 +285,10 @@ static const u32 ice_ptypes_ipv4_il[] = { * include IPv6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x00000000, 0x76000000, 0x10002000, 0x00000000, 0x000002AA, 0x00000000, 0x00000000, - 0x00000000, 0x03F00000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x00000540, 0x00000000, + 0x0002A000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -216,10 +299,10 @@ static const u32 ice_ptypes_ipv6_ofos[] = { * IPv6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos_all[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, - 0x00000000, 0x000002AA, 0x00000000, 0x00000000, - 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000, + 0x0000077E, 0x000002AA, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206, + 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -231,7 +314,7 @@ static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -304,8 +387,8 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = { static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00410000, 0x90842000, 0x00000007, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00410000, 0x908427E0, 0x00000007, + 0x0413F000, 0x00000041, 0x10410410, 0x00004104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -317,7 +400,7 @@ static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00820000, 0x21084000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08200000, 0x00000082, 0x20820820, 0x00008208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -329,7 +412,7 @@ static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01040000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x10400000, 0x00000104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -353,7 +436,7 @@ static const u32 ice_ptypes_icmp_il[] = { 0x00000000, 0x02040408, 0x40000102, 0x08101020, 0x00000408, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x42108000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x20800000, 0x00000208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -365,7 +448,7 @@ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -374,7 +457,7 @@ static const u32 ice_ptypes_gre_of[] = { /* Packet types for packets with an Innermost/Last MAC header */ static const u32 ice_ptypes_mac_il[] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -388,7 +471,7 @@ static const u32 ice_ptypes_mac_il[] = { static const u32 ice_ptypes_gtpc[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000180, 0x00000000, + 0x00000000, 0x00000000, 0x000001E0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -2325,6 +2408,130 @@ static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof) } /** + * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile + * @hw: device HW + * @prof: parser profile describing extracted FV (field vector) entries + * @prof_id: RSS profile identifier used to program symmetry registers + * + * The routine scans the parser profile's FV entries and looks for + * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found, + * it programs XOR-based symmetry so that flows hash identically regardless + * of packet direction. This preserves CPU affinity for the same 5-tuple. + * + * Notes: + * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed + * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly. + * - We guard against out-of-bounds access before looking at fv[i + len]. + */ +static void ice_rss_cfg_raw_symm(struct ice_hw *hw, + const struct ice_parser_profile *prof, + u64 prof_id) +{ + for (size_t i = 0; i < prof->fv_num; i++) { + u8 proto_id = prof->fv[i].proto_id; + u16 src_off = 0, dst_off = 0; + size_t src_idx, dst_idx; + bool is_matched = false; + unsigned int len = 0; + + switch (proto_id) { + /* IPv4 address pairs (outer/inner variants) */ + case ICE_PROT_IPV4_OF_OR_S: + case ICE_PROT_IPV4_IL: + case ICE_PROT_IPV4_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV4_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET; + dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET; + break; + + /* IPv6 address pairs (outer/inner variants) */ + case ICE_PROT_IPV6_OF_OR_S: + case ICE_PROT_IPV6_IL: + case ICE_PROT_IPV6_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV6_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET; + dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET; + break; + + /* L4 port pairs (TCP/UDP/SCTP) */ + case ICE_PROT_TCP_IL: + case ICE_PROT_UDP_IL_OR_S: + case ICE_PROT_SCTP_IL: + len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET; + dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET; + break; + + default: + continue; + } + + /* Bounds check before accessing fv[i + len]. */ + if (i + len >= prof->fv_num) + continue; + + /* Verify src/dst pairing for this protocol id. */ + is_matched = prof->fv[i].offset == src_off && + prof->fv[i + len].proto_id == proto_id && + prof->fv[i + len].offset == dst_off; + if (!is_matched) + continue; + + /* Program XOR symmetry for this field pair. */ + src_idx = i; + dst_idx = i + len; + + ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len); + + /* Skip over the pair we just handled; the loop's ++i advances + * one more element, hence the --i after the jump. + */ + i += (2 * len); + /* not strictly needed; keeps static analyzers happy */ + if (i == 0) + break; + --i; + } +} + +/* Max registers index per packet profile */ +#define ICE_SYMM_REG_INDEX_MAX 6 + +/** + * ice_rss_update_raw_symm - update symmetric hash configuration + * for raw pattern + * @hw: pointer to the hardware structure + * @cfg: configure parameters for raw pattern + * @id: profile tracking ID + * + * Update symmetric hash configuration for raw pattern if required. + * Otherwise only clear to default. + */ +void +ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + if (!map) + return; + /* clear to default */ + for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + + if (cfg->symm) + ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id); +} + +/** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 52f906d89eca..6c6cdc8addb1 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -22,6 +22,15 @@ #define ICE_FLOW_HASH_IPV6 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) +#define ICE_FLOW_HASH_IPV6_PRE32 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA)) +#define ICE_FLOW_HASH_IPV6_PRE48 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA)) +#define ICE_FLOW_HASH_IPV6_PRE64 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)) #define ICE_FLOW_HASH_TCP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) @@ -40,6 +49,33 @@ #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT) + +#define ICE_FLOW_HASH_GTP_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) + +#define ICE_FLOW_HASH_GTP_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) +#define ICE_FLOW_HASH_GTP_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) + #define ICE_FLOW_HASH_GTP_C_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) @@ -128,6 +164,23 @@ #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID) + +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID) + +#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12 +#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16 +#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8 +#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24 +#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0 +#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2 + /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by @@ -160,10 +213,13 @@ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_AH = 0x00200000, ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, + ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000, + ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000, /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and - * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs + * ICE_FLOW_SEG_HDR_IPV6. */ - ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, + ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000, + ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000, }; /* These segments all have the same PTYPES, but are otherwise distinguished by @@ -200,6 +256,15 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_DA, + ICE_FLOW_FIELD_IDX_IPV4_CHKSUM, + ICE_FLOW_FIELD_IDX_IPV4_ID, + ICE_FLOW_FIELD_IDX_IPV6_ID, + ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA, + ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA, + ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA, /* L4 */ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, @@ -208,6 +273,9 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_FLAGS, + ICE_FLOW_FIELD_IDX_TCP_CHKSUM, + ICE_FLOW_FIELD_IDX_UDP_CHKSUM, + ICE_FLOW_FIELD_IDX_SCTP_CHKSUM, /* ARP */ ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_DIP, @@ -228,13 +296,13 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, /* GTPU_UP */ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + ICE_FLOW_FIELD_IDX_GTPU_UP_QFI, /* GTPU_DWN */ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, - /* PPPoE */ + ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI, ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, /* PFCP */ ICE_FLOW_FIELD_IDX_PFCP_SEID, - /* L2TPv3 */ ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, /* ESP */ ICE_FLOW_FIELD_IDX_ESP_SPI, @@ -242,10 +310,16 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_AH_SPI, /* NAT_T ESP */ ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + /* L2TPV2 SESSION ID*/ + ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID, + /* L2TPV2_LEN SESSION ID */ + ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; +static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64"); + #define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) #define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) #define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) @@ -296,6 +370,10 @@ enum ice_rss_cfg_hdr_type { /* take inner headers as inputset for packet with outer ipv6. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, /* take outer headers first then inner headers as inputset */ + /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE, + /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE, ICE_RSS_ANY_HEADERS }; @@ -406,6 +484,12 @@ struct ice_flow_prof { bool symm; /* Symmetric Hash for RSS */ }; +struct ice_rss_raw_cfg { + struct ice_parser_profile prof; + bool raw_ena; + bool symm; +}; + struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ @@ -444,4 +528,6 @@ int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); +void ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index aebf8e08a297..d2576d606e10 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2177,8 +2177,7 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) */ static void ice_lag_disable_sriov_bond(struct ice_lag *lag) { - struct ice_netdev_priv *np = netdev_priv(lag->netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(lag->netdev); ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG); diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 10c312d49e05..185672c7e17d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -342,6 +342,9 @@ enum ice_flg64_bits { /* for ice_32byte_rx_flex_desc.pkt_length member */ #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ +/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */ +#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0) + enum ice_rx_flex_desc_status_error_0_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 4479c824561e..15621707fbf8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1427,7 +1427,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->reg_idx = vsi->rxq_map[i]; ring->vsi = vsi; ring->netdev = vsi->netdev; - ring->dev = dev; ring->count = vsi->num_rx_desc; ring->cached_phctime = pf->ptp.cached_phc_time; @@ -2769,7 +2768,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) * @vsi: VSI pointer * * Associate queue[s] with napi for all vectors. - * The caller must hold rtnl_lock. */ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { @@ -2779,6 +2777,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) if (!netdev) return; + ASSERT_RTNL(); ice_for_each_rxq(vsi, q_idx) netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, &vsi->rx_rings[q_idx]->q_vector->napi); @@ -2799,7 +2798,6 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) * @vsi: VSI pointer * * Clear the association between all VSI queues queue[s] and napi. - * The caller must hold rtnl_lock. */ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { @@ -2809,6 +2807,7 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) if (!netdev) return; + ASSERT_RTNL(); /* Clear the NAPI's interrupt number */ ice_for_each_q_vector(vsi, v_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 86f5859e88ef..2533876f1a2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -37,6 +37,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS("LIBETH"); +MODULE_IMPORT_NS("LIBETH_XDP"); MODULE_IMPORT_NS("LIBIE"); MODULE_IMPORT_NS("LIBIE_ADMINQ"); MODULE_IMPORT_NS("LIBIE_FWLOG"); @@ -2957,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) */ static int ice_max_xdp_frame_size(struct ice_vsi *vsi) { - if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - return ICE_RXBUF_1664; - else - return ICE_RXBUF_3072; + return ICE_RXBUF_3072; } /** @@ -3018,19 +3017,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } } xdp_features_set_redirect_target(vsi->netdev, true); - /* reallocate Rx queues that are used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, true); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); - /* reallocate Rx queues that were used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, false); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } resume_if: @@ -3949,9 +3940,10 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) * ice_deinit_pf - Unrolls initialziations done by ice_init_pf * @pf: board private structure to initialize */ -static void ice_deinit_pf(struct ice_pf *pf) +void ice_deinit_pf(struct ice_pf *pf) { - ice_service_task_stop(pf); + /* note that we unroll also on ice_init_pf() failure here */ + mutex_destroy(&pf->lag_mutex); mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); @@ -3977,6 +3969,9 @@ static void ice_deinit_pf(struct ice_pf *pf) if (pf->ptp.clock) ptp_clock_unregister(pf->ptp.clock); + if (!xa_empty(&pf->irq_tracker.entries)) + ice_free_irq_msix_misc(pf); + xa_destroy(&pf->dyn_ports); xa_destroy(&pf->sf_nums); } @@ -4030,13 +4025,25 @@ static void ice_set_pf_caps(struct ice_pf *pf) pf->max_pf_rxqs = func_caps->common_cap.num_rxq; } +void ice_start_service_task(struct ice_pf *pf) +{ + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task); + clear_bit(ICE_SERVICE_SCHED, pf->state); +} + /** * ice_init_pf - Initialize general software structures (struct ice_pf) * @pf: board private structure to initialize + * Return: 0 on success, negative errno otherwise. */ -static int ice_init_pf(struct ice_pf *pf) +int ice_init_pf(struct ice_pf *pf) { - ice_set_pf_caps(pf); + struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int err = -ENOMEM; mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); @@ -4049,32 +4056,7 @@ static int ice_init_pf(struct ice_pf *pf) init_waitqueue_head(&pf->reset_wait_queue); - /* setup service timer and periodic service task */ - timer_setup(&pf->serv_tmr, ice_service_timer, 0); - pf->serv_tmr_period = HZ; - INIT_WORK(&pf->serv_task, ice_service_task); - clear_bit(ICE_SERVICE_SCHED, pf->state); - mutex_init(&pf->avail_q_mutex); - pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); - if (!pf->avail_txqs) - return -ENOMEM; - - pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); - if (!pf->avail_rxqs) { - bitmap_free(pf->avail_txqs); - pf->avail_txqs = NULL; - return -ENOMEM; - } - - pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); - if (!pf->txtime_txqs) { - bitmap_free(pf->avail_txqs); - pf->avail_txqs = NULL; - bitmap_free(pf->avail_rxqs); - pf->avail_rxqs = NULL; - return -ENOMEM; - } mutex_init(&pf->vfs.table_lock); hash_init(pf->vfs.table); @@ -4087,7 +4069,36 @@ static int ice_init_pf(struct ice_pf *pf) xa_init(&pf->dyn_ports); xa_init(&pf->sf_nums); + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); + pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs) + goto undo_init; + + udp_tunnel_nic->set_port = ice_udp_tunnel_set_port; + udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port; + udp_tunnel_nic->shared = &hw->udp_tunnel_shared; + udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN]; + udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; + udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE]; + udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE; + + /* In case of MSIX we are going to setup the misc vector right here + * to handle admin queue events etc. In case of legacy and MSI + * the misc functionality and queue processing is combined in + * the same vector and that gets setup at open. + */ + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto undo_init; + } + return 0; +undo_init: + /* deinit handles half-initialized pf just fine */ + ice_deinit_pf(pf); + return err; } /** @@ -4722,9 +4733,8 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -int ice_init_dev(struct ice_pf *pf) +void ice_init_dev_hw(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int err; @@ -4744,61 +4754,28 @@ int ice_init_dev(struct ice_pf *pf) */ ice_set_safe_mode_caps(hw); } +} - err = ice_init_pf(pf); - if (err) { - dev_err(dev, "ice_init_pf failed: %d\n", err); - return err; - } - - pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; - pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; - pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; - if (pf->hw.tnl.valid_count[TNL_VXLAN]) { - pf->hw.udp_tunnel_nic.tables[0].n_entries = - pf->hw.tnl.valid_count[TNL_VXLAN]; - pf->hw.udp_tunnel_nic.tables[0].tunnel_types = - UDP_TUNNEL_TYPE_VXLAN; - } - if (pf->hw.tnl.valid_count[TNL_GENEVE]) { - pf->hw.udp_tunnel_nic.tables[1].n_entries = - pf->hw.tnl.valid_count[TNL_GENEVE]; - pf->hw.udp_tunnel_nic.tables[1].tunnel_types = - UDP_TUNNEL_TYPE_GENEVE; - } +int ice_init_dev(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + ice_set_pf_caps(pf); err = ice_init_interrupt_scheme(pf); if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); - err = -EIO; - goto unroll_pf_init; + return -EIO; } - /* In case of MSIX we are going to setup the misc vector right here - * to handle admin queue events etc. In case of legacy and MSI - * the misc functionality and queue processing is combined in - * the same vector and that gets setup at open. - */ - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto unroll_irq_scheme_init; - } + ice_start_service_task(pf); return 0; - -unroll_irq_scheme_init: - ice_clear_interrupt_scheme(pf); -unroll_pf_init: - ice_deinit_pf(pf); - return err; } void ice_deinit_dev(struct ice_pf *pf) { - ice_free_irq_msix_misc(pf); - ice_deinit_pf(pf); - ice_deinit_hw(&pf->hw); + ice_service_task_stop(pf); /* Service task is already stopped, so call reset directly. */ ice_reset(&pf->hw, ICE_RESET_PFR); @@ -5038,21 +5015,24 @@ static void ice_deinit_devlink(struct ice_pf *pf) static int ice_init(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); int err; - err = ice_init_dev(pf); - if (err) + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); return err; + } if (pf->hw.mac_type == ICE_MAC_E830) { err = pci_enable_ptm(pf->pdev, NULL); if (err) - dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); + dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n"); } err = ice_alloc_vsis(pf); if (err) - goto err_alloc_vsis; + goto unroll_pf_init; err = ice_init_pf_sw(pf); if (err) @@ -5089,8 +5069,8 @@ err_init_link: ice_deinit_pf_sw(pf); err_init_pf_sw: ice_dealloc_vsis(pf); -err_alloc_vsis: - ice_deinit_dev(pf); +unroll_pf_init: + ice_deinit_pf(pf); return err; } @@ -5101,7 +5081,7 @@ static void ice_deinit(struct ice_pf *pf) ice_deinit_pf_sw(pf); ice_dealloc_vsis(pf); - ice_deinit_dev(pf); + ice_deinit_pf(pf); } /** @@ -5235,6 +5215,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + bool need_dev_deinit = false; struct ice_adapter *adapter; struct ice_pf *pf; struct ice_hw *hw; @@ -5331,10 +5312,14 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } pf->adapter = adapter; - err = ice_init(pf); + err = ice_init_dev(pf); if (err) goto unroll_adapter; + err = ice_init(pf); + if (err) + goto unroll_dev_init; + devl_lock(priv_to_devlink(pf)); err = ice_load(pf); if (err) @@ -5352,10 +5337,14 @@ unroll_load: unroll_init: devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); +unroll_dev_init: + need_dev_deinit = true; unroll_adapter: ice_adapter_put(pdev); unroll_hw_init: ice_deinit_hw(hw); + if (need_dev_deinit) + ice_deinit_dev(pf); return err; } @@ -5450,10 +5439,6 @@ static void ice_remove(struct pci_dev *pdev) ice_hwmon_exit(pf); - ice_service_task_stop(pf); - ice_aq_cancel_waiting_tasks(pf); - set_bit(ICE_DOWN, pf->state); - if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); @@ -5471,6 +5456,11 @@ static void ice_remove(struct pci_dev *pdev) ice_set_wake(pf); ice_adapter_put(pdev); + ice_deinit_hw(&pf->hw); + + ice_deinit_dev(pf); + ice_aq_cancel_waiting_tasks(pf); + set_bit(ICE_DOWN, pf->state); } /** @@ -7138,6 +7128,9 @@ void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, + &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); @@ -7862,12 +7855,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu) frame_size - ICE_ETH_PKT_HDR_PAD); return -EINVAL; } - } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { - if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { - netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", - ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); - return -EINVAL; - } } /* if a reset is in progress, wait for some time for it to complete */ @@ -8071,9 +8058,7 @@ static int ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); u16 bmode; bmode = pf->first_sw->bridge_mode; @@ -8143,8 +8128,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 __always_unused flags, struct netlink_ext_ack __always_unused *extack) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; struct ice_sw *pf_sw; @@ -9578,8 +9562,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, */ int ice_open(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); if (ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't open net device while reset is in progress"); diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 7c09ea0f03ba..725167d557a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -82,26 +82,46 @@ enum ice_sw_tunnel_type { enum ice_prot_id { ICE_PROT_ID_INVAL = 0, ICE_PROT_MAC_OF_OR_S = 1, + ICE_PROT_MAC_O2 = 2, ICE_PROT_MAC_IL = 4, + ICE_PROT_MAC_IN_MAC = 7, ICE_PROT_ETYPE_OL = 9, ICE_PROT_ETYPE_IL = 10, + ICE_PROT_PAY = 15, + ICE_PROT_EVLAN_O = 16, + ICE_PROT_VLAN_O = 17, + ICE_PROT_VLAN_IF = 18, + ICE_PROT_MPLS_OL_MINUS_1 = 27, + ICE_PROT_MPLS_OL_OR_OS = 28, + ICE_PROT_MPLS_IL = 29, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, + ICE_PROT_IPV4_IL_IL = 34, ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, + ICE_PROT_IPV6_IL_IL = 42, + ICE_PROT_IPV6_NEXT_PROTO = 43, + ICE_PROT_IPV6_FRAG = 47, ICE_PROT_TCP_IL = 49, ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_GRE_OF = 64, + ICE_PROT_NSH_F = 84, ICE_PROT_ESP_F = 88, ICE_PROT_ESP_2 = 89, ICE_PROT_SCTP_IL = 96, ICE_PROT_ICMP_IL = 98, ICE_PROT_ICMPV6_IL = 100, + ICE_PROT_VRRP_F = 101, + ICE_PROT_OSPF = 102, ICE_PROT_PPPOE = 103, ICE_PROT_L2TPV3 = 104, + ICE_PROT_ATAOE_OF = 114, + ICE_PROT_CTRL_OF = 116, + ICE_PROT_LLDP_OF = 117, ICE_PROT_ARP_OF = 118, ICE_PROT_META_ID = 255, /* when offset == metadata */ + ICE_PROT_EAPOL_OF = 120, ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index fb0f6365a6d6..985b3e79b312 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -500,6 +500,9 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) if (tstamp) { shhwtstamps.hwtstamp = ns_to_ktime(tstamp); ice_trace(tx_tstamp_complete, skb, idx); + + /* Count the number of Tx timestamps that succeeded */ + pf->ptp.tx_hwtstamp_good++; } skb_tstamp_tx(skb, &shhwtstamps); @@ -558,6 +561,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) { struct ice_ptp_port *ptp_port; unsigned long flags; + u32 tstamp_good = 0; struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; @@ -658,11 +662,16 @@ skip_ts_read: if (tstamp) { shhwtstamps.hwtstamp = ns_to_ktime(tstamp); ice_trace(tx_tstamp_complete, skb, idx); + + /* Count the number of Tx timestamps that succeeded */ + tstamp_good++; } skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } + + pf->ptp.tx_hwtstamp_good += tstamp_good; } /** @@ -2206,8 +2215,7 @@ static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, int ice_ptp_hwtstamp_get(struct net_device *netdev, struct kernel_hwtstamp_config *config) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); if (pf->ptp.state != ICE_PTP_READY) return -EIO; @@ -2278,8 +2286,7 @@ int ice_ptp_hwtstamp_set(struct net_device *netdev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); int err; if (pf->ptp.state != ICE_PTP_READY) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 137f2070a2d9..27016aac4f1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -237,6 +237,7 @@ struct ice_ptp_pin_desc { * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration * @reset_time: kernel time after clock stop on reset + * @tx_hwtstamp_good: number of completed Tx timestamp requests * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed @@ -261,6 +262,7 @@ struct ice_ptp { struct ptp_clock *clock; struct kernel_hwtstamp_config tstamp_config; u64 reset_time; + u64 tx_hwtstamp_good; u32 tx_hwtstamp_skipped; u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_flushed; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 843e82fd3bf9..6b1126ddb561 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1190,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) */ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vf_vsi; struct device *dev; struct ice_vf *vf; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 73f08d02f9c7..ad76768a4232 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -7,6 +7,8 @@ #include <linux/netdevice.h> #include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include <net/dsfield.h> #include <net/mpls.h> #include <net/xdp.h> @@ -111,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (dma_unmap_len(tx_buf, len)) + if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), @@ -125,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) dev_kfree_skb_any(tx_buf->skb); break; case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, false); break; case ICE_TX_BUF_XDP_XMIT: xdp_return_frame(tx_buf->xdpf); @@ -506,61 +508,67 @@ err: return -ENOMEM; } +void ice_rxq_pp_destroy(struct ice_rx_ring *rq) +{ + struct libeth_fq fq = { + .fqes = rq->rx_fqes, + .pp = rq->pp, + }; + + libeth_rx_fq_destroy(&fq); + rq->rx_fqes = NULL; + rq->pp = NULL; + + if (!rq->hdr_pp) + return; + + fq.fqes = rq->hdr_fqes; + fq.pp = rq->hdr_pp; + + libeth_rx_fq_destroy(&fq); + rq->hdr_fqes = NULL; + rq->hdr_pp = NULL; +} + /** * ice_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned */ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) { - struct xdp_buff *xdp = &rx_ring->xdp; - struct device *dev = rx_ring->dev; u32 size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buf) - return; if (rx_ring->xsk_pool) { ice_xsk_clean_rx_ring(rx_ring); goto rx_skip_free; } - if (xdp->data) { - xdp_return_buff(xdp); - xdp->data = NULL; - } + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_fqes) + return; + + libeth_xdp_return_stash(&rx_ring->xdp); /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem); - if (!rx_buf->page) - continue; + if (rx_ring->hdr_pp) + libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(dev, rx_buf->dma, - rx_buf->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - - rx_buf->page = NULL; - rx_buf->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } -rx_skip_free: - if (rx_ring->xsk_pool) - memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); - else - memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); + if (rx_ring->vsi->type == ICE_VSI_PF && + xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) { + xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + } + ice_rxq_pp_destroy(rx_ring); + +rx_skip_free: /* Zero out the descriptor ring */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -568,7 +576,6 @@ rx_skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; rx_ring->next_to_use = 0; } @@ -580,26 +587,20 @@ rx_skip_free: */ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) { + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; ice_clean_rx_ring(rx_ring); - if (rx_ring->vsi->type == ICE_VSI_PF) - if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); WRITE_ONCE(rx_ring->xdp_prog, NULL); if (rx_ring->xsk_pool) { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - } else { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; } if (rx_ring->desc) { size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); - dmam_free_coherent(rx_ring->dev, size, - rx_ring->desc, rx_ring->dma); + dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } } @@ -612,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) */ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) { - struct device *dev = rx_ring->dev; + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; - if (!dev) - return -ENOMEM; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_buf); - rx_ring->rx_buf = - kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); - if (!rx_ring->rx_buf) - return -ENOMEM; - /* round up to nearest page */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -633,22 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) if (!rx_ring->desc) { dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", size); - goto err; + return -ENOMEM; } rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); return 0; - -err: - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - return -ENOMEM; } /** @@ -662,7 +647,7 @@ err: * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static u32 -ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, +ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, union ice_32b_rx_flex_desc *eop_desc) { @@ -672,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; - ice_xdp_meta_set_desc(xdp, eop_desc); + xdp->desc = eop_desc; - act = bpf_prog_run_xdp(xdp_prog, xdp); + act = bpf_prog_run_xdp(xdp_prog, &xdp->base); switch (act) { case XDP_PASS: break; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); - ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); + ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); if (ret == ICE_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: - if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) + if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog)) goto out_failure; ret = ICE_XDP_REDIR; break; @@ -700,8 +685,10 @@ out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_DROP: + libeth_xdp_return_buff(xdp); ret = ICE_XDP_CONSUMED; } + exit: return ret; } @@ -790,53 +777,6 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } /** - * ice_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buf struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - */ -static bool -ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) - return true; - - /* alloc new page for storage */ - page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, ice_rx_pg_order(rx_ring)); - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = rx_ring->rx_offset; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; - - return true; -} - -/** * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. * @rx_ring: ring to init descriptors on * @count: number of descriptors to initialize @@ -882,9 +822,20 @@ void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) */ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) { + const struct libeth_fq_fp hdr_fq = { + .pp = rx_ring->hdr_pp, + .fqes = rx_ring->hdr_fqes, + .truesize = rx_ring->hdr_truesize, + .count = rx_ring->count, + }; + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; - struct ice_rx_buf *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -892,30 +843,39 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) /* get the Rx descriptor and buffer based on next_to_use */ rx_desc = ICE_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_buf[ntu]; do { - /* if we fail here, we have work remaining */ - if (!ice_alloc_mapped_page(rx_ring, bi)) - break; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + break; + } /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); + + if (!hdr_fq.pp) + goto next; + + addr = libeth_rx_alloc(&hdr_fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + + libeth_rx_recycle_slow(fq.fqes[ntu].netmem); + break; + } + + rx_desc->read.hdr_addr = cpu_to_le64(addr); +next: rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = ICE_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_buf; ntu = 0; } @@ -932,402 +892,6 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) } /** - * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse - * @rx_buf: Rx buffer to adjust - * @size: Size of adjustment - * - * Update the offset within page so that Rx buf will be ready to be reused. - * For systems with PAGE_SIZE < 8192 this function will flip the page offset - * so the second half of page assigned to Rx buffer will be used, otherwise - * the offset is moved by "size" bytes - */ -static void -ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) -{ -#if (PAGE_SIZE < 8192) - /* flip page offset to other buffer */ - rx_buf->page_offset ^= size; -#else - /* move offset up to the next cache line */ - rx_buf->page_offset += size; -#endif -} - -/** - * ice_can_reuse_rx_page - Determine if page can be reused for another Rx - * @rx_buf: buffer containing the page - * - * If page is reusable, we have a green light for calling ice_reuse_rx_page, - * which will assign the current buffer to the buffer that next_to_alloc is - * pointing to; otherwise, the DMA mapping needs to be destroyed and - * page freed - */ -static bool -ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) -{ - unsigned int pagecnt_bias = rx_buf->pagecnt_bias; - struct page *page = rx_buf->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - - /* if we are only owner of page we can reuse it */ - if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) - return false; -#if (PAGE_SIZE >= 8192) -#define ICE_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) - if (rx_buf->page_offset > ICE_LAST_OFFSET) - return false; -#endif /* PAGE_SIZE >= 8192) */ - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buf->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp buff to place the data into - * @rx_buf: buffer containing page to add - * @size: packet length from rx_desc - * - * This function will add the data contained in rx_buf->page to the xdp buf. - * It will just attach the page as a frag. - */ -static int -ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct ice_rx_buf *rx_buf, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(xdp)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(xdp); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) - return -ENOMEM; - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, - rx_buf->page_offset, size); - sinfo->xdp_frags_size += size; - - if (page_is_pfmemalloc(rx_buf->page)) - xdp_buff_set_frag_pfmemalloc(xdp); - - return 0; -} - -/** - * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: Rx descriptor ring to store buffers on - * @old_buf: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - */ -static void -ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) -{ - u16 nta = rx_ring->next_to_alloc; - struct ice_rx_buf *new_buf; - - new_buf = &rx_ring->rx_buf[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* Transfer page from old buffer to new buffer. - * Move each member individually to avoid possible store - * forwarding stalls and unnecessary copy of skb. - */ - new_buf->dma = old_buf->dma; - new_buf->page = old_buf->page; - new_buf->page_offset = old_buf->page_offset; - new_buf->pagecnt_bias = old_buf->pagecnt_bias; -} - -/** - * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use - * @rx_ring: Rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * @ntc: index of next to clean element - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct ice_rx_buf * -ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, - const unsigned int ntc) -{ - struct ice_rx_buf *rx_buf; - - rx_buf = &rx_ring->rx_buf[ntc]; - prefetchw(rx_buf->page); - - if (!size) - return rx_buf; - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buf->pagecnt_bias--; - - return rx_buf; -} - -/** - * ice_get_pgcnts - grab page_count() for gathered fragments - * @rx_ring: Rx descriptor ring to store the page counts on - * @ntc: the next to clean element (not included in this frame!) - * - * This function is intended to be called right before running XDP - * program so that the page recycling mechanism will be able to take - * a correct decision regarding underlying pages; this is done in such - * way as XDP program can change the refcount of page - */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) -{ - u32 idx = rx_ring->first_desc; - struct ice_rx_buf *rx_buf; - u32 cnt = rx_ring->count; - - while (idx != ntc) { - rx_buf = &rx_ring->rx_buf[idx]; - rx_buf->pgcnt = page_count(rx_buf->page); - - if (++idx == cnt) - idx = 0; - } -} - -/** - * ice_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function builds an skb around an existing XDP buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. Driver has - * already combined frags (if any) to skb_shared_info. - */ -static struct sk_buff * -ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - u8 metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - unsigned int nr_frags; - struct sk_buff *skb; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* Prefetch first cache line of first page. If xdp->data_meta - * is unused, this points exactly as xdp->data, otherwise we - * likely have a consumer accessing first few bytes of meta - * data, and then actual data. - */ - net_prefetch(xdp->data_meta); - /* build an skb around the page buffer */ - skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); - if (unlikely(!skb)) - return NULL; - - /* must to record Rx queue, otherwise OS features such as - * symmetric queue won't work - */ - skb_record_rx_queue(skb, rx_ring->q_index); - - /* update pointers within the skb to store the data */ - skb_reserve(skb, xdp->data - xdp->data_hard_start); - __skb_put(skb, xdp->data_end - xdp->data); - if (metasize) - skb_metadata_set(skb, metasize); - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - - return skb; -} - -/** - * ice_construct_skb - Allocate skb and populate it - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff * -ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int size = xdp->data_end - xdp->data; - struct skb_shared_info *sinfo = NULL; - struct ice_rx_buf *rx_buf; - unsigned int nr_frags = 0; - unsigned int headlen; - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - net_prefetch(xdp->data); - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; - skb_record_rx_queue(skb, rx_ring->q_index); - /* Determine available headroom for copy */ - headlen = size; - if (headlen > ICE_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, - sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (size) { - /* besides adding here a partial frag, we are going to add - * frags from xdp_buff, make sure there is enough space for - * them - */ - if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { - dev_kfree_skb(skb); - return NULL; - } - skb_add_rx_frag(skb, 0, rx_buf->page, - rx_buf->page_offset + headlen, size, - xdp->frame_sz); - } else { - /* buffer is unused, restore biased page count in Rx buffer; - * data was copied onto skb's linear part so there's no - * need for adjusting page offset and we can reuse this buffer - * as-is - */ - rx_buf->pagecnt_bias++; - } - - if (unlikely(xdp_buff_has_frags(xdp))) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - - memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], - sizeof(skb_frag_t) * nr_frags); - - xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags, - sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - } - - return skb; -} - -/** - * ice_put_rx_buf - Clean up used buffer and either recycle or free - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buf: Rx buffer to pull data from - * - * This function will clean up the contents of the rx_buf. It will either - * recycle the buffer or unmap it and free the associated resources. - */ -static void -ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) -{ - if (!rx_buf) - return; - - if (ice_can_reuse_rx_page(rx_buf)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, - ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buf->page = NULL; -} - -/** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame - * @rx_ring: Rx ring with all the auxiliary data - * @xdp: XDP buffer carrying linear + frags part - * @ntc: the next to clean element (not included in this frame!) - * @verdict: return code from XDP program execution - * - * Called after XDP program is completed, or on error with verdict set to - * ICE_XDP_CONSUMED. - * - * Walk through buffers from first_desc to the end of the frame, releasing - * buffers and satisfying internal page recycle mechanism. The action depends - * on verdict from XDP program. - */ -static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 ntc, u32 verdict) -{ - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - u32 xdp_frags = 0; - int i = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - - while (idx != ntc) { - buf = &rx_ring->rx_buf[idx]; - if (++idx == cnt) - idx = 0; - - /* An XDP program could release fragments from the end of the - * buffer. For these, we need to keep the pagecnt_bias as-is. - * To do this, only adjust pagecnt_bias for fragments up to - * the total remaining after the XDP program has run. - */ - if (verdict != ICE_XDP_CONSUMED) - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - else if (i++ <= xdp_frags) - buf->pagecnt_bias++; - - ice_put_rx_buf(rx_ring, buf); - } - - xdp->data = NULL; - rx_ring->first_desc = ntc; -} - -/** * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on * @@ -1361,9 +925,8 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) total_rx_pkts++; } - rx_ring->first_desc = ntc; rx_ring->next_to_clean = ntc; - ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring)); } /** @@ -1381,16 +944,17 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; - unsigned int offset = rx_ring->rx_offset; - struct xdp_buff *xdp = &rx_ring->xdp; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + LIBETH_XDP_ONSTACK_BUFF(xdp); u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; bool failure; + libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { xdp_ring = rx_ring->xdp_ring; @@ -1400,19 +964,21 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; struct sk_buff *skb; unsigned int size; u16 stat_err_bits; u16 vlan_tci; + bool rxe; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + /* + * The DD bit will always be zero for unused descriptors + * because it's cleared in cleanup or when setting the DMA + * address of the header buffer, which never uses the DD bit. + * If the hardware wrote the descriptor, it will be non-zero. */ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) @@ -1426,71 +992,65 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ice_trace(clean_rx_irq, rx_ring, rx_desc); + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); + rxe = ice_test_staterr(rx_desc->wb.status_error0, + stat_err_bits); + + if (!rx_ring->hdr_pp) + goto payload; + + size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1, + ICE_RX_FLEX_DESC_HDR_LEN_M); + if (unlikely(rxe)) + size = 0; + + rx_buf = &rx_ring->hdr_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); + rx_buf->netmem = 0; + +payload: size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + if (unlikely(rxe)) + size = 0; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + rx_buf = &rx_ring->rx_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); - /* Increment ntc before calls to ice_put_rx_mbuf() */ if (++ntc == cnt) ntc = 0; - if (!xdp->data) { - void *hard_start; - - hard_start = page_address(rx_buf->page) + rx_buf->page_offset - - offset; - xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); - xdp_buff_clear_frags_flag(xdp); - } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); - break; - } - /* skip if it is NOP desc */ - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data)) continue; - ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; - total_rx_bytes += xdp_get_buff_len(xdp); - total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); + if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) + xdp_xmit |= xdp_verdict; + total_rx_bytes += xdp_get_buff_len(&xdp->base); + total_rx_pkts++; + xdp->data = NULL; continue; + construct_skb: - if (likely(ice_ring_uses_build_skb(rx_ring))) - skb = ice_build_skb(rx_ring, xdp); - else - skb = ice_construct_skb(rx_ring, xdp); + skb = xdp_build_skb_from_buff(&xdp->base); + xdp->data = NULL; + /* exit if we failed to retrieve a buffer */ if (!skb) { + libeth_xdp_return_buff_slow(xdp); rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - xdp_verdict = ICE_XDP_CONSUMED; - } - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - - if (!skb) - break; - - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); - if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, - stat_err_bits))) { - dev_kfree_skb_any(skb); continue; } vlan_tci = ice_get_vlan_tci(rx_desc); - /* pad the skb if needed, to make a valid ethernet frame */ - if (eth_skb_pad(skb)) - continue; - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1507,11 +1067,13 @@ construct_skb: rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ - failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring)); if (xdp_xmit) ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); + libeth_xdp_save_buff(&rx_ring->xdp, xdp); + if (rx_ring->ring_stats) ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 841a07bfba54..e440c55d9e9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,6 +4,8 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include <net/libeth/types.h> + #include "ice_type.h" #define ICE_DFLT_IRQ_WORK 256 @@ -27,72 +29,6 @@ #define ICE_MAX_TXQ_PER_TXQG 128 -/* Attempt to maximize the headroom available for incoming frames. We use a 2K - * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. - * This leaves us with 512 bytes of room. From that we need to deduct the - * space needed for the shared info and the padding needed to IP align the - * frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define ICE_2K_TOO_SMALL_WITH_PADDING \ - ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ - SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) - -/** - * ice_compute_pad - compute the padding - * @rx_buf_len: buffer length - * - * Figure out the size of half page based on given buffer length and - * then subtract the skb_shared_info followed by subtraction of the - * actual buffer length; this in turn results in the actual space that - * is left for padding usage - */ -static inline int ice_compute_pad(int rx_buf_len) -{ - int half_page_size; - - half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; -} - -/** - * ice_skb_pad - determine the padding that we can supply - * - * Figure out the right Rx buffer size and based on that calculate the - * padding - */ -static inline int ice_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (ICE_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = ICE_RXBUF_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return ice_compute_pad(rx_buf_len); -} - -#define ICE_SKB_PAD ice_skb_pad() -#else -#define ICE_2K_TOO_SMALL_WITH_PADDING false -#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - /* We are assuming that the cache line is always 64 Bytes here for ice. * In order to make sure that is a correct assumption there is a check in probe * to print a warning if the read from GLPCI_CNF2 tells us that the cache line @@ -112,10 +48,6 @@ static inline int ice_skb_pad(void) (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define ICE_RX_DESC_UNUSED(R) \ - ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->first_desc - (R)->next_to_use - 1) - #define ICE_RING_QUARTER(R) ((R)->count >> 2) #define ICE_TX_FLAGS_TSO BIT(0) @@ -197,14 +129,6 @@ struct ice_tx_offload_params { u8 header_len; }; -struct ice_rx_buf { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - unsigned int pgcnt; - unsigned int pagecnt_bias; -}; - struct ice_q_stats { u64 pkts; u64 bytes; @@ -262,15 +186,6 @@ struct ice_pkt_ctx { __be16 vlan_proto; }; -struct ice_xdp_buff { - struct xdp_buff xdp_buff; - const union ice_32b_rx_flex_desc *eop_desc; - const struct ice_pkt_ctx *pkt_ctx; -}; - -/* Required for compatibility with xdp_buffs from xsk_pool */ -static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); - /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -323,7 +238,7 @@ struct ice_tstamp_ring { struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + struct page_pool *pp; struct net_device *netdev; /* netdev ring maps to */ struct ice_vsi *vsi; /* Backreference to associated VSI */ struct ice_q_vector *q_vector; /* Backreference to associated vector */ @@ -335,14 +250,19 @@ struct ice_rx_ring { u16 next_to_alloc; union { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_fqes; struct xdp_buff **xdp_buf; }; + /* CL2 - 2nd cacheline starts here */ + struct libeth_fqe *hdr_fqes; + struct page_pool *hdr_pp; + union { - struct ice_xdp_buff xdp_ext; - struct xdp_buff xdp; + struct libeth_xdp_buff_stash xdp; + struct libeth_xdp_buff *xsk; }; + /* CL3 - 3rd cacheline starts here */ union { struct ice_pkt_ctx pkt_ctx; @@ -352,12 +272,13 @@ struct ice_rx_ring { }; }; struct bpf_prog *xdp_prog; - u16 rx_offset; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u16 first_desc; + + u32 hdr_truesize; + u32 truesize; /* stats structs */ struct ice_ring_stats *ring_stats; @@ -368,12 +289,11 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u16 max_frame; + u16 rx_hdr_len; u16 rx_buf_len; dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; -#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) #define ICE_RX_FLAGS_MULTIDEV BIT(3) #define ICE_RX_FLAGS_RING_GCS BIT(4) @@ -422,21 +342,6 @@ struct ice_tx_ring { u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; -static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) -{ - return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); -} - -static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; -} - -static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; -} - static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) { return !!ring->ch; @@ -491,18 +396,13 @@ struct ice_coalesce_stored { static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) { -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif return 0; } -#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) - union ice_32b_rx_flex_desc; void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs); +void ice_rxq_pp_destroy(struct ice_rx_ring *rq); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 45cfaabc41cb..956da38d63b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -3,6 +3,7 @@ #include <linux/filter.h> #include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -230,9 +231,12 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, if (ice_is_port_repr_netdev(netdev)) ice_repr_inc_rx_stats(netdev, skb->len); + + /* __skb_push() is needed because xdp_build_skb_from_buff() + * calls eth_type_trans() + */ + __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, netdev); - } else { - skb->protocol = eth_type_trans(skb, rx_ring->netdev); } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -270,19 +274,18 @@ static void ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, struct xdp_frame_bulk *bq) { - dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - switch (tx_buf->type) { case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, true); break; case ICE_TX_BUF_XDP_XMIT: + dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); xdp_return_frame_bulk(tx_buf->xdpf, bq); break; } + dma_unmap_len_set(tx_buf, len, 0); tx_buf->type = ICE_TX_BUF_EMPTY; } @@ -377,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf; u32 cnt = xdp_ring->count; void *data = xdp->data; + struct page *page; u32 nr_frags = 0; u32 free_space; u32 frag = 0; + u32 offset; free_space = ICE_DESC_UNUSED(xdp_ring); if (free_space < ICE_RING_QUARTER(xdp_ring)) @@ -399,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_head = &xdp_ring->tx_buf[ntu]; tx_buf = tx_head; + page = virt_to_page(data); + offset = offset_in_page(xdp->data); + for (;;) { dma_addr_t dma; - dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) - goto dma_unmap; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_buf, len, size); - dma_unmap_addr_set(tx_buf, dma, dma); - if (frame) { + dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_unmap; tx_buf->type = ICE_TX_BUF_FRAG; } else { + dma = page_pool_get_dma_addr(page) + offset; + dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL); tx_buf->type = ICE_TX_BUF_XDP_TX; tx_buf->raw_buf = data; } + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); @@ -430,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_desc = ICE_TX_DESC(xdp_ring, ntu); tx_buf = &xdp_ring->tx_buf[ntu]; + page = skb_frag_page(&sinfo->frags[frag]); + offset = skb_frag_off(&sinfo->frags[frag]); data = skb_frag_address(&sinfo->frags[frag]); size = skb_frag_size(&sinfo->frags[frag]); frag++; @@ -514,10 +525,13 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, */ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; - *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, - xdp_ext->pkt_ctx); + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc, + &rx_ring->pkt_ctx); if (!*ts_ns) return -ENODATA; @@ -545,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; - *hash = ice_get_rx_hash(xdp_ext->eop_desc); - *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + *hash = ice_get_rx_hash(xdp_ext->desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc); if (!likely(*hash)) return -ENODATA; @@ -567,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, u16 *vlan_tci) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; + + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); - *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + *vlan_proto = rx_ring->pkt_ctx.vlan_proto; if (!*vlan_proto) return -ENODATA; - *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + *vlan_tci = ice_get_vlan_tci(xdp_ext->desc); if (!*vlan_tci) return -ENODATA; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 99717730f21a..6a3f10f7a53f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -135,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, void ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); -static inline void -ice_xdp_meta_set_desc(struct xdp_buff *xdp, - union ice_32b_rx_flex_desc *eop_desc) -{ - struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, - xdp_buff); - - xdp_ext->eop_desc = eop_desc; -} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index b0a1b67071c5..6a2ec8389a8f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1063,6 +1063,7 @@ struct ice_hw_port_stats { u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ + u64 rx_len_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index b00708907176..7a9c75d1d07c 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -53,6 +53,46 @@ struct ice_mdd_vf_events { u16 last_printed; }; +enum ice_hash_ip_ctx_type { + ICE_HASH_IP_CTX_IP = 0, + ICE_HASH_IP_CTX_IP_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP, + ICE_HASH_IP_CTX_IP_AH, + ICE_HASH_IP_CTX_IP_PFCP, + ICE_HASH_IP_CTX_IP_UDP, + ICE_HASH_IP_CTX_IP_TCP, + ICE_HASH_IP_CTX_IP_SCTP, + ICE_HASH_IP_CTX_MAX, +}; + +struct ice_vf_hash_ip_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX]; +}; + +enum ice_hash_gtpu_ctx_type { + ICE_HASH_GTPU_CTX_EH_IP = 0, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + ICE_HASH_GTPU_CTX_MAX, +}; + +struct ice_vf_hash_gtpu_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; + +struct ice_vf_hash_ctx { + struct ice_vf_hash_ip_ctx v4; + struct ice_vf_hash_ip_ctx v6; + struct ice_vf_hash_gtpu_ctx ipv4; + struct ice_vf_hash_gtpu_ctx ipv6; +}; + /* Structure to store fdir fv entry */ struct ice_fdir_prof_info { struct ice_parser_profile prof; @@ -66,6 +106,12 @@ struct ice_vf_qs_bw { u8 tc; }; +/* Structure to store RSS field vector entry */ +struct ice_rss_prof_info { + struct ice_parser_profile prof; + bool symm; +}; + /* VF operations */ struct ice_vf_ops { enum ice_disq_rst_src reset_type; @@ -106,6 +152,8 @@ struct ice_vf { u16 ctrl_vsi_idx; struct ice_vf_fdir fdir; struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; + struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; + struct ice_vf_hash_ctx hash_ctx; u64 rss_hashcfg; /* RSS hash configuration */ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 575fd48f485f..989ff1fd9110 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -3,6 +3,7 @@ #include <linux/bpf_trace.h> #include <linux/unroll.h> +#include <net/libeth/xdp.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ice.h" @@ -169,50 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure */ -static int +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) { - size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : - sizeof(*rx_ring->rx_buf); - void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); - - if (!sw_ring) - return -ENOMEM; - if (pool_present) { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - rx_ring->xdp_buf = sw_ring; + rx_ring->xdp_buf = kcalloc(rx_ring->count, + sizeof(*rx_ring->xdp_buf), + GFP_KERNEL); + if (!rx_ring->xdp_buf) + return -ENOMEM; } else { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - rx_ring->rx_buf = sw_ring; - } - - return 0; -} - -/** - * ice_realloc_zc_buf - reallocate XDP ZC queue pairs - * @vsi: Current VSI - * @zc: is zero copy set - * - * Reallocate buffer for rx_rings that might be used by XSK. - * XDP requires more memory, than rx_buf provides. - * Returns 0 on success, negative on failure - */ -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) -{ - struct ice_rx_ring *rx_ring; - uint i; - - ice_for_each_rxq(vsi, i) { - rx_ring = vsi->rx_rings[i]; - if (!rx_ring->xsk_pool) - continue; - - if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) - return -ENOMEM; } return 0; @@ -228,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) */ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { + struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0; @@ -241,8 +211,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ice_is_xdp_ena_vsi(vsi); if (if_running) { - struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; - ret = ice_qp_dis(vsi, qid); if (ret) { netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); @@ -303,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; - /* Put private info that changes on a per-packet basis - * into xdp_buff_xsk->cb. - */ - ice_xdp_meta_set_desc(*xdp, rx_desc); - rx_desc++; xdp++; } @@ -393,69 +356,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, } /** - * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer - * @rx_ring: Rx ring - * @xdp: Pointer to XDP buffer - * - * This function allocates a new skb from a zero-copy Rx buffer. - * - * Returns the skb on success, NULL on failure. - */ -static struct sk_buff * -ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int totalsize = xdp->data_end - xdp->data_meta; - unsigned int metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - struct sk_buff *skb; - u32 nr_frags = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - net_prefetch(xdp->data_meta); - - skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); - if (unlikely(!skb)) - return NULL; - - memcpy(__skb_put(skb, totalsize), xdp->data_meta, - ALIGN(totalsize, sizeof(long))); - - if (metasize) { - skb_metadata_set(skb, metasize); - __skb_pull(skb, metasize); - } - - if (likely(!xdp_buff_has_frags(xdp))) - goto out; - - for (int i = 0; i < nr_frags; i++) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - skb_frag_t *frag = &sinfo->frags[i]; - struct page *page; - void *addr; - - page = dev_alloc_page(); - if (!page) { - dev_kfree_skb(skb); - return NULL; - } - addr = page_to_virt(page); - - memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); - - __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, - addr, 0, skb_frag_size(frag)); - } - -out: - xsk_buff_free(xdp); - return skb; -} - -/** * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ * @xdp_ring: XDP Tx ring * @xsk_pool: AF_XDP buffer pool pointer @@ -669,10 +569,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, int budget) { + struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk; unsigned int total_rx_bytes = 0, total_rx_packets = 0; u32 ntc = rx_ring->next_to_clean; u32 ntu = rx_ring->next_to_use; - struct xdp_buff *first = NULL; struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; @@ -686,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, xdp_prog = READ_ONCE(rx_ring->xdp_prog); xdp_ring = rx_ring->xdp_ring; - if (ntc != rx_ring->first_desc) - first = *ice_xdp_buf(rx_ring, rx_ring->first_desc); - while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; @@ -724,15 +621,17 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, first = xdp; } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { xsk_buff_free(first); - break; + first = NULL; } if (++ntc == cnt) ntc = 0; - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first)) continue; + ((struct libeth_xdp_buff *)first)->desc = rx_desc; + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring, xsk_pool); if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { @@ -740,7 +639,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, } else if (xdp_res == ICE_XDP_EXIT) { failure = true; first = NULL; - rx_ring->first_desc = ntc; break; } else if (xdp_res == ICE_XDP_CONSUMED) { xsk_buff_free(first); @@ -752,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, total_rx_packets++; first = NULL; - rx_ring->first_desc = ntc; continue; construct_skb: /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, first); + skb = xdp_build_skb_from_zc(first); if (!skb) { + xsk_buff_free(first); + first = NULL; + rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - break; + continue; } first = NULL; - rx_ring->first_desc = ntc; - - if (eth_skb_pad(skb)) { - skb = NULL; - continue; - } total_rx_bytes += skb->len; total_rx_packets++; @@ -781,7 +675,9 @@ construct_skb: } rx_ring->next_to_clean = ntc; - entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); + rx_ring->xsk = (struct libeth_xdp_buff *)first; + + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, entries_to_alloc); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 600cbeeaa203..5275fcedc9e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -22,7 +22,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present); void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid); void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, @@ -77,8 +77,8 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } static inline int -ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, - bool __always_unused zc) +ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, + bool __always_unused pool_present) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c index 370f6ec2a374..7928f4e8e788 100644 --- a/drivers/net/ethernet/intel/ice/virt/queues.c +++ b/drivers/net/ethernet/intel/ice/virt/queues.c @@ -842,18 +842,17 @@ int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; - ring->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - ring->max_frame = qpi->rxq.max_pkt_size; + vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - ring->max_frame += VLAN_HLEN; + vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c index cbdbb32d512b..085e69ec0cfc 100644 --- a/drivers/net/ethernet/intel/ice/virt/rss.c +++ b/drivers/net/ethernet/intel/ice/virt/rss.c @@ -36,6 +36,11 @@ static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, + {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC}, + {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE}, }; struct ice_vc_hash_field_match_type { @@ -87,8 +92,125 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + ICE_FLOW_HASH_IPV4}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), @@ -110,6 +232,35 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), + ICE_FLOW_HASH_IPV6_PRE64}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + ICE_FLOW_HASH_IPV6_PRE64 | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, {VIRTCHNL_PROTO_HDR_TCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, @@ -120,6 +271,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), ICE_FLOW_HASH_TCP_PORT}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + ICE_FLOW_HASH_TCP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_UDP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, @@ -130,6 +300,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), ICE_FLOW_HASH_UDP_PORT}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + ICE_FLOW_HASH_UDP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_SCTP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, @@ -140,6 +329,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), ICE_FLOW_HASH_SCTP_PORT}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + ICE_FLOW_HASH_SCTP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_PPPOE, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, @@ -155,8 +363,54 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, + {VIRTCHNL_PROTO_HDR_GTPC, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID), + BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)}, }; +static int +ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type) +{ + struct ice_vsi_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + /* clear previous hash_type */ + ctx->info.q_opt_rss = vsi->info.q_opt_rss & + ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */ + ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, + hash_type); + + /* Preserve existing queueing option setting */ + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_flags; + + ctx->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + + ret = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (ret) { + dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n", + ret, libie_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + } + + kfree(ctx); + + return ret; +} + /** * ice_vc_validate_pattern * @vf: pointer to the VF info @@ -271,6 +525,11 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, const struct ice_vc_hash_field_match_type *hf_list; const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; + bool outer_ipv4 = false; + bool outer_ipv6 = false; + bool inner_hdr = false; + bool has_gre = false; + u32 *addl_hdrs = &hash_cfg->addl_hdrs; u64 *hash_flds = &hash_cfg->hash_flds; @@ -290,17 +549,17 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr = &rss_cfg->proto_hdrs.proto_hdr[i]; - bool hdr_found = false; + u32 hdr_found = 0; int j; - /* Find matched ice headers according to virtchnl headers. */ + /* Find matched ice headers according to virtchnl headers. + * Also figure out the outer type of GTPU headers. + */ for (j = 0; j < hdr_list_len; j++) { struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; - if (proto_hdr->type == hdr_map.vc_hdr) { - *addl_hdrs |= hdr_map.ice_hdr; - hdr_found = true; - } + if (proto_hdr->type == hdr_map.vc_hdr) + hdr_found = hdr_map.ice_hdr; } if (!hdr_found) @@ -318,8 +577,98 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, break; } } + + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr) + outer_ipv4 = true; + else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 && + !inner_hdr) + outer_ipv6 = true; + /* for GRE and L2TPv2, take inner header as input set if no + * any field is selected from outer headers. + * for GTPU, take inner header and GTPU teid as input set. + */ + else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN || + proto_hdr->type == + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) || + ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) && + *hash_flds == 0)) { + /* set inner_hdr flag, and clean up outer header */ + inner_hdr = true; + + /* clear outer headers */ + *addl_hdrs = 0; + + if (outer_ipv4 && outer_ipv6) + return false; + + if (outer_ipv4) + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; + else if (outer_ipv6) + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; + else + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS; + + if (has_gre && outer_ipv4) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE; + if (has_gre && outer_ipv6) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE; + + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) + has_gre = true; + } + + *addl_hdrs |= hdr_found; + + /* refine hash hdrs and fields for IP fragment */ + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID); + } + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID); + } + } + + /* refine gtpu header if we take outer as input set for a no inner + * ip gtpu flow. + */ + if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS && + *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP); + *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP; } + /* refine hash field for esp and nat-t-esp. */ + if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) && + (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) { + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP); + *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP; + *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI); + } + + /* refine hash hdrs for L4 udp/tcp/sctp. */ + if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_SCTP) && + *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER) + *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER; + return true; } @@ -337,6 +686,874 @@ static bool ice_vf_adv_rss_offload_ena(u32 caps) } /** + * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid + * @cfg: RSS hash configuration to test + * + * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise. + */ +static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) +{ + return cfg->hash_flds && cfg->addl_hdrs; +} + +/** + * ice_hash_cfg_reset - Reset an RSS hash context + * @cfg: RSS hash configuration to reset + * + * Reset fields of @cfg that store the active rule information. + */ +static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg) +{ + cfg->hash_flds = 0; + cfg->addl_hdrs = 0; + cfg->hdr_type = ICE_RSS_OUTER_HEADERS; + cfg->symm = 0; +} + +/** + * ice_hash_cfg_record - Record an RSS hash context + * @ctx: destination (global) RSS hash configuration + * @cfg: source RSS hash configuration to record + * + * Copy the active rule information from @cfg into @ctx. + */ +static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx, + struct ice_rss_hash_cfg *cfg) +{ + ctx->hash_flds = cfg->hash_flds; + ctx->addl_hdrs = cfg->addl_hdrs; + ctx->hdr_type = cfg->hdr_type; + ctx->symm = cfg->symm; +} + +/** + * ice_hash_moveout - Delete an RSS configuration (keep context) + * @vf: VF pointer + * @cfg: RSS hash configuration + * + * Return: 0 on success (including when already absent); -ENOENT if @cfg is + * invalid or VSI is missing; -EBUSY on hardware removal failure. + */ +static int +ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (!ice_is_hash_cfg_valid(cfg) || !vsi) + return -ENOENT; + + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); + if (ret && ret != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return -EBUSY; + } + + return 0; +} + +/** + * ice_hash_moveback - Add an RSS hash configuration for a VF + * @vf: VF pointer + * @cfg: RSS hash configuration to apply + * + * Add @cfg to @vf if the context is valid and VSI exists; programs HW. + * + * Return: + * * 0 on success + * * -ENOENT if @cfg is invalid or VSI is missing + * * -EBUSY if hardware programming fails + */ +static int +ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (!ice_is_hash_cfg_valid(cfg) || !vsi) + return -ENOENT; + + ret = ice_add_rss_cfg(hw, vsi, cfg); + if (ret) { + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return -EBUSY; + } + + return 0; +} + +/** + * ice_hash_remove - remove a RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * This function will delete a RSS hash configuration and also delete the + * hash context which stores the rule info. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int +ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + int ret; + + ret = ice_hash_moveout(vf, cfg); + if (ret && ret != -ENOENT) + return ret; + + ice_hash_cfg_reset(cfg); + + return 0; +} + +struct ice_gtpu_ctx_action { + u32 ctx_idx; + const u32 *remove_list; + int remove_count; + const u32 *moveout_list; + int moveout_count; +}; + +/** + * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration + * @vf: pointer to the VF info + * @ctx: pointer to the context of the GTPU hash + * @ctx_idx: index of the hash context + * + * Pre-processes the GTPU hash configuration before adding a new + * hash context. It removes or reorders existing hash configurations that may + * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is + * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due + * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule + * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is + * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to + * avoid conflict. + * + * Return: 0 on success or a negative error code on failure + */ +static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf, + struct ice_vf_hash_gtpu_ctx *ctx, + u32 ctx_idx) +{ + int ret, i; + + static const u32 remove_eh_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP, + ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + + static const u32 remove_eh_ip_udp[] = { + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + }; + static const u32 moveout_eh_ip_udp[] = { + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + + static const u32 remove_eh_ip_tcp[] = { + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + static const u32 moveout_eh_ip_tcp[] = { + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + }; + + static const u32 remove_up_ip[] = { + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + }; + static const u32 moveout_up_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const u32 moveout_up_ip_udp_tcp[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const u32 remove_dw_ip[] = { + ICE_HASH_GTPU_CTX_DW_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + static const u32 moveout_dw_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const struct ice_gtpu_ctx_action actions[] = { + { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip, + ARRAY_SIZE(remove_eh_ip), NULL, 0 }, + { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp, + ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp, + ARRAY_SIZE(moveout_eh_ip_udp) }, + { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp, + ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp, + ARRAY_SIZE(moveout_eh_ip_tcp) }, + { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip, + ARRAY_SIZE(remove_up_ip), moveout_up_ip, + ARRAY_SIZE(moveout_up_ip) }, + { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp, + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, + { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp, + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, + { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip, + ARRAY_SIZE(remove_dw_ip), moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + }; + + for (i = 0; i < ARRAY_SIZE(actions); i++) { + if (actions[i].ctx_idx != ctx_idx) + continue; + + if (actions[i].remove_list) { + for (int j = 0; j < actions[i].remove_count; j++) { + u16 rm = actions[i].remove_list[j]; + + ret = ice_hash_remove(vf, &ctx->ctx[rm]); + if (ret && ret != -ENOENT) + return ret; + } + } + + if (actions[i].moveout_list) { + for (int j = 0; j < actions[i].moveout_count; j++) { + u16 mv = actions[i].moveout_list[j]; + + ret = ice_hash_moveout(vf, &ctx->ctx[mv]); + if (ret && ret != -ENOENT) + return ret; + } + } + break; + } + + return 0; +} + +/** + * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration + * @vf: VF pointer + * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP) + * + * Remove covered/recorded IP RSS configurations prior to adding a new one. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx) +{ + int i, ret; + + for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++) + if (ice_is_hash_cfg_valid(&ctx->ctx[i])) { + ret = ice_hash_remove(vf, &ctx->ctx[i]); + if (ret) + return ret; + } + + return 0; +} + +/** + * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index + * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_* + * + * Determine the GTPU hash context index based on the combination of + * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport + * protocols (UDP, TCP) within IPv4 or IPv6 flows. + * + * Return: A valid context index (0-8) if the header combination is supported, + * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid. + */ +static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs) +{ + u32 eh_idx, ip_idx; + + if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) + eh_idx = 0; + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) + eh_idx = 1; + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) + eh_idx = 2; + else + return ICE_HASH_GTPU_CTX_MAX; + + ip_idx = 0; + if (hdrs & ICE_FLOW_SEG_HDR_UDP) + ip_idx = 1; + else if (hdrs & ICE_FLOW_SEG_HDR_TCP) + ip_idx = 2; + + if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) + return eh_idx * 3 + ip_idx; + else + return ICE_HASH_GTPU_CTX_MAX; +} + +/** + * ice_map_ip_ctx_idx - map the index of the IP L4 hash context + * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX. + * + * The IP L4 hash context use the index to classify for IPv4/IPv6 with + * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP + * this function map the index based on the protocol headers. + * + * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX + * if no matching context is found. + */ +static u8 ice_map_ip_ctx_idx(u32 hdrs) +{ + u8 i; + + static struct { + u32 hdrs; + u8 ctx_idx; + } ip_ctx_idx_map[] = { + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_ESP, + ICE_HASH_IP_CTX_IP_ESP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_NAT_T_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_AH, + ICE_HASH_IP_CTX_IP_AH }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_PFCP_SESSION, + ICE_HASH_IP_CTX_IP_PFCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_ESP, + ICE_HASH_IP_CTX_IP_ESP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_NAT_T_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_AH, + ICE_HASH_IP_CTX_IP_AH }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_PFCP_SESSION, + ICE_HASH_IP_CTX_IP_PFCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + /* the remaining mappings are used for default RSS */ + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + }; + + for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) { + if (hdrs == ip_ctx_idx_map[i].hdrs) + return ip_ctx_idx_map[i].ctx_idx; + } + + return ICE_HASH_IP_CTX_MAX; +} + +/** + * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF + * @vf: pointer to the VF structure + * @cfg: pointer to the RSS hash configuration + * + * Prepare the RSS hash context for a given VF based on the additional + * protocol headers specified in @cfg. This includes pre-configuration + * for IP and GTPU-based flows. + * + * If the configuration matches a known IP context, the function sets up + * the appropriate IP hash context. If the configuration includes GTPU + * headers, it prepares the GTPU-specific context accordingly. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int +ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) { + int ret = 0; + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6); + + if (ret) + return ret; + } + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4, + ice_gtpu_ctx_idx); + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6, + ice_gtpu_ctx_idx); + } + + return 0; +} + +/** + * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration + * @vf: pointer to the VF info + * @ctx: pointer to the context of the GTPU hash + * @cfg: pointer to the RSS hash configuration + * @ctx_idx: index of the hash context + * + * Post-processes the GTPU hash configuration after a new hash + * context has been successfully added. It updates the context with the new + * configuration and restores any previously removed hash contexts that need + * to be re-applied. This ensures proper TCAM rule ordering and avoids + * conflicts between overlapping GTPU rules. + * + * Return: 0 on success or a negative error code on failure + */ +static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf, + struct ice_vf_hash_gtpu_ctx *ctx, + struct ice_rss_hash_cfg *cfg, u32 ctx_idx) +{ + /* GTPU hash moveback lookup table indexed by context ID. + * Each entry is a bitmap indicating which contexts need moveback + * operations when the corresponding context index is processed. + */ + static const unsigned long + ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = { + [ICE_HASH_GTPU_CTX_EH_IP] = 0, + [ICE_HASH_GTPU_CTX_EH_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_UP_IP) | + BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP), + [ICE_HASH_GTPU_CTX_EH_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_UP_IP) | + BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP), + [ICE_HASH_GTPU_CTX_UP_IP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_UP_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_UP_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + }; + unsigned long moveback_mask; + int ret; + int i; + + if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX)) + return 0; + + ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs; + ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds; + ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type; + ctx->ctx[ctx_idx].symm = cfg->symm; + + moveback_mask = ice_gtpu_moveback_tbl[ctx_idx]; + for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) { + ret = ice_hash_moveback(vf, &ctx->ctx[i]); + if (ret && ret != -ENOENT) + return ret; + } + + return 0; +} + +static int +ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg); + } + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4, + cfg, ice_gtpu_ctx_idx); + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6, + cfg, ice_gtpu_ctx_idx); + } + + return 0; +} + +/** + * ice_rem_rss_cfg_post - post-process the RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Post process the RSS hash configuration after deleting a hash + * config. Such as, it will reset the hash context for the GTPU hash. + */ +static void +ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]); + } + + if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) + return; + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]); +} + +/** + * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Wrapper function to delete a flow profile base on an RSS configuration, + * and also post process the hash context base on the rollback mechanism + * which handle some rules conflict by ice_add_rss_cfg_wrap. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); + /* We just ignore -ENOENT, because if two configurations share the same + * profile remove one of them actually removes both, since the + * profile is deleted. + */ + if (ret && ret != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return ret; + } + + ice_rem_rss_cfg_post(vf, cfg); + + return 0; +} + +/** + * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Add a flow profile based on an RSS configuration. Use a rollback + * mechanism to handle rule conflicts due to TCAM + * write sequence from top to down. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (ice_add_rss_cfg_pre(vf, cfg)) + return -EINVAL; + + ret = ice_add_rss_cfg(hw, vsi, cfg); + if (ret) { + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return ret; + } + + if (ice_add_rss_cfg_post(vf, cfg)) + ret = -EINVAL; + + return ret; +} + +/** + * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS + * @vf: pointer to the VF info + * @proto: pointer to the virtchnl protocol header + * @raw_cfg: pointer to the RSS raw pattern configuration + * + * Parser function to get spec and mask from virtchnl message, and parse + * them to get the corresponding profile and offset. The profile is used + * to add RSS configuration. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, + struct ice_rss_raw_cfg *raw_cfg) +{ + struct ice_parser_result pkt_parsed; + struct ice_hw *hw = &vf->pf->hw; + struct ice_parser_profile prof; + struct ice_parser *psr; + u8 *pkt_buf, *msk_buf; + u16 pkt_len; + int ret = 0; + + pkt_len = proto->raw.pkt_len; + if (!pkt_len) + return -EINVAL; + if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) + pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET; + + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) { + ret = -ENOMEM; + goto free_alloc; + } + + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len); + + psr = ice_parser_create(hw); + if (IS_ERR(psr)) { + ret = PTR_ERR(psr); + goto free_alloc; + } + + ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed); + if (ret) + goto parser_destroy; + + ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf, + pkt_len, ICE_BLK_RSS, &prof); + if (ret) + goto parser_destroy; + + memcpy(&raw_cfg->prof, &prof, sizeof(prof)); + +parser_destroy: + ice_parser_destroy(psr); +free_alloc: + kfree(pkt_buf); + kfree(msk_buf); + return ret; +} + +/** + * ice_add_raw_rss_cfg - add RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function adds the RSS configuration for raw pattern. + * Check if current profile is matched. If not, remove the old + * one and add the new profile to HW directly. Update the symmetric + * hash configuration as well. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_rss_prof_info *rss_prof; + struct ice_hw *hw = &vf->pf->hw; + int i, ptg, ret = 0; + u16 vsi_handle; + u64 id; + + vsi_handle = vf->lan_vsi_idx; + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + rss_prof = &vf->rss_prof_info[ptg]; + + /* check if ptg already has a profile */ + if (rss_prof->prof.fv_num) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + if (rss_prof->prof.fv[i].proto_id != + prof->fv[i].proto_id || + rss_prof->prof.fv[i].offset != + prof->fv[i].offset) + break; + } + + /* current profile is matched, check symmetric hash */ + if (i == ICE_MAX_FV_WORDS) { + if (rss_prof->symm != cfg->symm) + goto update_symm; + return ret; + } + + /* current profile is not matched, remove it */ + ret = + ice_rem_prof_id_flow(hw, ICE_BLK_RSS, + ice_get_hw_vsi_num(hw, vsi_handle), + id); + if (ret) { + dev_err(dev, "remove RSS flow failed\n"); + return ret; + } + + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (ret) { + dev_err(dev, "remove RSS profile failed\n"); + return ret; + } + } + + /* add new profile */ + ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS); + if (ret) { + dev_err(dev, "HW profile add failed\n"); + return ret; + } + + memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile)); + +update_symm: + rss_prof->symm = cfg->symm; + ice_rss_update_raw_symm(hw, cfg, id); + return ret; +} + +/** + * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function removes the RSS configuration for raw pattern. + * Check if vsi group is already removed first. If not, remove the + * profile. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_hw *hw = &vf->pf->hw; + int ptg, ret = 0; + u16 vsig, vsi; + u64 id; + + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + + memset(&vf->rss_prof_info[ptg], 0, + sizeof(struct ice_rss_prof_info)); + + /* check if vsig is already removed */ + vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx); + if (vsi >= ICE_MAX_VSI) { + ret = -EINVAL; + goto err; + } + + vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig; + if (vsig) { + ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id); + if (ret) + goto err; + + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (ret) + goto err; + } + + return ret; + +err: + dev_err(dev, "HW profile remove failed\n"); + return ret; +} + +/** * ice_vc_handle_rss_cfg * @vf: pointer to the VF info * @msg: pointer to the message buffer @@ -352,6 +1569,9 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) struct device *dev = ice_pf_to_dev(vf->pf); struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi; + u8 hash_type; + bool symm; + int ret; if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", @@ -387,49 +1607,44 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } - if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + if (ret) + v_ret = ice_err_to_virt_err(ret); goto error_param; } - if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { - struct ice_vsi_ctx *ctx; - u8 lut_type, hash_type; - int status; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + if (ret) { + v_ret = ice_err_to_virt_err(ret); + goto error_param; + } - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : - ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + /* Configure RSS hash for raw pattern */ + if (rss_cfg->proto_hdrs.tunnel_level == 0 && + rss_cfg->proto_hdrs.count == 0) { + struct ice_rss_raw_cfg raw_cfg; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs, + &raw_cfg)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ctx->info.q_opt_rss = - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); - - /* Preserve existing queueing option setting */ - ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & - ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); - ctx->info.q_opt_tc = vsi->info.q_opt_tc; - ctx->info.q_opt_flags = vsi->info.q_opt_rss; - - ctx->info.valid_sections = - cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); - - status = ice_update_vsi(hw, vsi->idx, ctx, NULL); - if (status) { - dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", - status, libie_aq_str(hw->adminq.sq_last_status)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (add) { + raw_cfg.symm = symm; + if (ice_add_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { - vsi->info.q_opt_rss = ctx->info.q_opt_rss; + if (ice_rem_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - - kfree(ctx); } else { struct ice_rss_hash_cfg cfg; @@ -448,24 +1663,12 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } if (add) { - if (ice_add_rss_cfg(hw, vsi, &cfg)) { + cfg.symm = symm; + if (ice_add_rss_cfg_wrap(vf, &cfg)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", - vsi->vsi_num, v_ret); - } } else { - int status; - - status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); - /* We just ignore -ENOENT, because if two configurations - * share the same profile remove one of them actually - * removes both, since the profile is deleted. - */ - if (status && status != -ENOENT) { + if (ice_rem_rss_cfg_wrap(vf, &cfg)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", - vf->vf_id, status); - } } } diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index ca4da0c89979..50fa7be0c00d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -735,12 +735,10 @@ static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) #define IDPF_CAP_RSS (\ VIRTCHNL2_FLOW_IPV4_TCP |\ - VIRTCHNL2_FLOW_IPV4_TCP |\ VIRTCHNL2_FLOW_IPV4_UDP |\ VIRTCHNL2_FLOW_IPV4_SCTP |\ VIRTCHNL2_FLOW_IPV4_OTHER |\ VIRTCHNL2_FLOW_IPV6_TCP |\ - VIRTCHNL2_FLOW_IPV6_TCP |\ VIRTCHNL2_FLOW_IPV6_UDP |\ VIRTCHNL2_FLOW_IPV6_SCTP |\ VIRTCHNL2_FLOW_IPV6_OTHER) diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index 8c46481d2e1f..7a06eaf46a08 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -3,16 +3,94 @@ #include "idpf.h" #include "idpf_devids.h" +#include "idpf_lan_vf_regs.h" #include "idpf_virtchnl.h" #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" +#define IDPF_NETWORK_ETHERNET_PROGIF 0x01 +#define IDPF_CLASS_NETWORK_ETHERNET_PROGIF \ + (PCI_CLASS_NETWORK_ETHERNET << 8 | IDPF_NETWORK_ETHERNET_PROGIF) +#define IDPF_VF_TEST_VAL 0xfeed0000u + MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_IMPORT_NS("LIBETH"); MODULE_IMPORT_NS("LIBETH_XDP"); MODULE_LICENSE("GPL"); /** + * idpf_get_device_type - Helper to find if it is a VF or PF device + * @pdev: PCI device information struct + * + * Return: PF/VF device ID or -%errno on failure. + */ +static int idpf_get_device_type(struct pci_dev *pdev) +{ + void __iomem *addr; + int ret; + + addr = ioremap(pci_resource_start(pdev, 0) + VF_ARQBAL, 4); + if (!addr) { + pci_err(pdev, "Failed to allocate BAR0 mbx region\n"); + return -EIO; + } + + writel(IDPF_VF_TEST_VAL, addr); + if (readl(addr) == IDPF_VF_TEST_VAL) + ret = IDPF_DEV_ID_VF; + else + ret = IDPF_DEV_ID_PF; + + iounmap(addr); + + return ret; +} + +/** + * idpf_dev_init - Initialize device specific parameters + * @adapter: adapter to initialize + * @ent: entry in idpf_pci_tbl + * + * Return: %0 on success, -%errno on failure. + */ +static int idpf_dev_init(struct idpf_adapter *adapter, + const struct pci_device_id *ent) +{ + int ret; + + if (ent->class == IDPF_CLASS_NETWORK_ETHERNET_PROGIF) { + ret = idpf_get_device_type(adapter->pdev); + switch (ret) { + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + default: + return ret; + } + + return 0; + } + + switch (ent->device) { + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + default: + return -ENODEV; + } + + return 0; +} + +/** * idpf_remove - Device removal routine * @pdev: PCI device information struct */ @@ -165,21 +243,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->req_tx_splitq = true; adapter->req_rx_splitq = true; - switch (ent->device) { - case IDPF_DEV_ID_PF: - idpf_dev_ops_init(adapter); - break; - case IDPF_DEV_ID_VF: - idpf_vf_dev_ops_init(adapter); - adapter->crc_enable = true; - break; - default: - err = -ENODEV; - dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", - ent->device); - goto err_free; - } - adapter->pdev = pdev; err = pcim_enable_device(pdev); if (err) @@ -259,11 +322,18 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* setup msglvl */ adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); + err = idpf_dev_init(adapter, ent); + if (err) { + dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", + ent->device); + goto destroy_vc_event_wq; + } + err = idpf_cfg_hw(adapter); if (err) { dev_err(dev, "Failed to configure HW structure for adapter: %d\n", err); - goto err_cfg_hw; + goto destroy_vc_event_wq; } mutex_init(&adapter->vport_ctrl_lock); @@ -284,7 +354,7 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -err_cfg_hw: +destroy_vc_event_wq: destroy_workqueue(adapter->vc_event_wq); err_vc_event_wq_alloc: destroy_workqueue(adapter->stats_wq); @@ -304,6 +374,7 @@ err_free: static const struct pci_device_id idpf_pci_tbl[] = { { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, + { PCI_DEVICE_CLASS(IDPF_CLASS_NETWORK_ETHERNET_PROGIF, ~0)}, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 61dfcd8cb370..ac57212ab02b 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, true)) { - dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); + dev_warn(&adapter->pdev->dev, "Vlan id %d is not added\n", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index d5b1b974b4a3..3069b583fd81 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -198,7 +198,7 @@ static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. + * previous prot_autoc_read_82599. * * This part (82599) may need to hold a the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. @@ -1622,7 +1622,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; } - /* store source and destination IP masks (big-enian) */ + /* store source and destination IP masks (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 170a29d162c6..a1d04914fbbc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -318,7 +318,7 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * @@ -492,7 +492,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 532813d8d028..244de500963e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,5 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o cn20k/mbox_init.o + rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \ + cn20k/npa.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index ec0e11c77cbf..42044cd810b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1994,7 +1994,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) nvec = pci_msix_vec_count(cgx->pdev); err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); - if (err < 0 || err != nvec) { + if (err < 0) { dev_err(dev, "Request for %d msix vectors failed, err %d\n", nvec, err); goto err_release_regions; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c new file mode 100644 index 000000000000..498968bf4cf5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "debugfs.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx) +{ + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", + sq_ctx->ena, sq_ctx->qint_idx); + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", + sq_ctx->substream, sq_ctx->sdp_mcast); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", + sq_ctx->cq, sq_ctx->sqe_way_mask); + + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", + sq_ctx->default_chan, sq_ctx->sqb_count); + + seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", + sq_ctx->sqb_aura, sq_ctx->sq_int); + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); + + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", + sq_ctx->max_sqe_size, sq_ctx->cq_limit); + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", + sq_ctx->lmt_dis, sq_ctx->mnq_dis); + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", + sq_ctx->tail_offset, sq_ctx->smenq_offset); + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); + + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_octs); + seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq; + + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); + + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", + cq_ctx->avg_con, cq_ctx->cint_idx); + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", + cq_ctx->cq_err, cq_ctx->qint_idx); + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", + cq_ctx->bpid, cq_ctx->bp_ena); + + seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high); + seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med); + seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low); + seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n", + cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 | + cq_ctx->lbpid_low); + seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena); + + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", + cq_ctx->update_time, cq_ctx->avg_level); + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", + cq_ctx->head, cq_ctx->tail); + + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); + seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n", + cq_ctx->qsize, cq_ctx->stashing); + + seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching); + seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac); + seq_printf(m, "W3: stash_thresh \t\t\t%d\n", + cq_ctx->stash_thresh); + + seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n", + cq_ctx->msh_valid, cq_ctx->msh_dst); + + seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n", + cq_ctx->cpt_drop_err_en); + seq_printf(m, "W3: ena \t\t\t%d\n", + cq_ctx->ena); + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", + cq_ctx->drop_ena, cq_ctx->drop); + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); + + seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext); + seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext); +} + +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_aura_s *aura = &rsp->aura; + + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); + + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", + aura->ena, aura->pool_caching); + seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con); + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", + aura->pool_drop_ena, aura->aura_drop_ena); + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", + aura->bp_ena, aura->aura_drop); + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", + aura->shift, aura->avg_level); + + seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n", + (u64)aura->count, aura->bpid); + + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", + (u64)aura->limit, aura->bp, aura->fc_ena); + + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", + aura->fc_up_crossing, aura->fc_stype); + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); + + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); + + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", + aura->pool_drop, aura->update_time); + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", + aura->err_int, aura->err_int_ena); + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", + aura->thresh_int, aura->thresh_int_ena); + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", + aura->thresh_up, aura->thresh_qint_idx); + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); + + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); +} + +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_pool_s *pool = &rsp->pool; + + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); + + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", + pool->ena, pool->nat_align); + seq_printf(m, "W1: stack_caching\t%d\n", + pool->stack_caching); + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", + pool->buf_offset, pool->buf_size); + + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", + pool->stack_max_pages, pool->stack_pages); + + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", + pool->stack_offset, pool->shift, pool->avg_level); + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", + pool->avg_con, pool->fc_ena, pool->fc_stype); + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", + pool->fc_hyst_bits, pool->fc_up_crossing); + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); + + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); + + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); + + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); + + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", + pool->err_int, pool->err_int_ena); + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", + pool->thresh_int_ena, pool->thresh_up); + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", + pool->thresh_qint_idx, pool->err_qint_idx); + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h new file mode 100644 index 000000000000..a2e3a2cd6edb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef DEBUFS_H +#define DEBUFS_H + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../mbox.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx); +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c new file mode 100644 index 000000000000..aa2016fd1bba --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu, + struct nix_cn20k_aq_enq_req *req, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, + (struct nix_aq_enq_rsp *)rsp); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c new file mode 100644 index 000000000000..fe8f926c8b75 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu, + struct npa_cn20k_aq_enq_req *req, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req, + (struct npa_aq_enq_rsp *)rsp); +} +EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h index 76ce3ec6da9c..763f6cabd7c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -8,6 +8,8 @@ #ifndef STRUCT_H #define STRUCT_H +#define NIX_MAX_CTX_SIZE 128 + /* * CN20k RVU PF MBOX Interrupt Vector Enumeration * @@ -37,4 +39,342 @@ enum rvu_af_cn20k_int_vec_e { RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, RVU_AF_CN20K_INT_VEC_CNT = 0xa, }; + +struct nix_cn20k_sq_ctx_s { + u64 ena : 1; /* W0 */ + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; + u64 smq : 11; /* W1 */ + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_weight : 14; + u64 default_chan : 12; + u64 sqb_count : 16; + u64 reserved_120_120 : 1; + u64 smq_rr_count_lb : 7; + u64 smq_rr_count_ub : 25; /* W2 */ + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 reserved_191_191 : 1; + u64 max_sqe_size : 2; /* W3 */ + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 reserved_253_255 : 3; + u64 next_sqb : 64; /* W4 */ + u64 tail_sqb : 64; /* W5 */ + u64 smenq_sqb : 64; /* W6 */ + u64 smenq_next_sqb : 64; /* W7 */ + u64 head_sqb : 64; /* W8 */ + u64 reserved_576_583 : 8; /* W9 */ + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 reserved_630_639 : 10; + u64 scm_lso_rem : 18; /* W10 */ + u64 reserved_658_703 : 46; + u64 octs : 48; /* W11 */ + u64 reserved_752_767 : 16; + u64 pkts : 48; /* W12 */ + u64 reserved_816_831 : 16; + u64 aged_drop_octs : 32; /* W13 */ + u64 aged_drop_pkts : 32; + u64 dropped_octs : 48; /* W14 */ + u64 reserved_944_959 : 16; + u64 dropped_pkts : 48; /* W15 */ + u64 reserved_1008_1023 : 16; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_cq_ctx_s { + u64 base : 64; /* W0 */ + u64 lbp_ena : 1; /* W1 */ + u64 lbpid_low : 3; + u64 bp_ena : 1; + u64 lbpid_med : 3; + u64 bpid : 9; + u64 lbpid_high : 3; + u64 qint_idx : 7; + u64 cq_err : 1; + u64 cint_idx : 7; + u64 avg_con : 9; + u64 wrptr : 20; + u64 tail : 20; /* W2 */ + u64 head : 20; + u64 avg_level : 8; + u64 update_time : 16; + u64 bp : 8; /* W3 */ + u64 drop : 8; + u64 drop_ena : 1; + u64 ena : 1; + u64 cpt_drop_err_en : 1; + u64 reserved_211_211 : 1; + u64 msh_dst : 11; + u64 msh_valid : 1; + u64 stash_thresh : 4; + u64 lbp_frac : 4; + u64 caching : 1; + u64 stashing : 1; + u64 reserved_234_235 : 2; + u64 qsize : 4; + u64 cq_err_int : 8; + u64 cq_err_int_ena : 8; + u64 bpid_ext : 2; /* W4 */ + u64 reserved_258_259 : 2; + u64 lbpid_ext : 2; + u64 reserved_262_319 : 58; + u64 reserved_320_383 : 64; /* W5 */ + u64 reserved_384_447 : 64; /* W6 */ + u64 reserved_448_511 : 64; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_rq_ctx_s { + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 reserved_24_34 : 11; + u64 port_il4_dis : 1; + u64 port_ol4_dis : 1; + u64 lenerr_dis : 1; + u64 csum_il4_dis : 1; + u64 csum_ol4_dis : 1; + u64 len_il4_dis : 1; + u64 len_il3_dis : 1; + u64 len_ol4_dis : 1; + u64 len_ol3_dis : 1; + u64 wqe_aura : 20; + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 pb_stashing : 1; + u64 ipsecd_drop_en : 1; + u64 chi_ena : 1; + u64 reserved_125_127 : 3; + u64 band_prof_id_l : 10; + u64 sso_fc_ena : 1; + u64 policer_ena : 1; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 spb_high_sizem1 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 reserved_171_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 band_prof_id_h : 4; + u64 reserved_188_189 : 2; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; + u64 xqe_drop : 8; + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; + u64 lpb_aura_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 reserved_288_291 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 reserved_315_319 : 5; + u64 ltag : 24; + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 ipsec_vwqe : 1; + u64 vwqe_ena : 1; + u64 vtime_wait : 8; + u64 max_vsize_exp : 4; + u64 vwqe_skip : 2; + u64 reserved_382_383 : 2; + u64 octs : 48; + u64 reserved_432_447 : 16; + u64 pkts : 48; + u64 reserved_496_511 : 16; + u64 drop_octs : 48; + u64 reserved_560_575 : 16; + u64 drop_pkts : 48; + u64 reserved_624_639 : 16; + u64 re_pkts : 48; + u64 reserved_688_703 : 16; + u64 reserved_704_767 : 64; + u64 reserved_768_831 : 64; + u64 reserved_832_895 : 64; + u64 reserved_896_959 : 64; + u64 reserved_960_1023 : 64; +}; + +static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_aura_s { + u64 pool_addr; /* W0 */ + u64 ena : 1; /* W1 */ + u64 reserved_65 : 2; + u64 pool_caching : 1; + u64 reserved_68 : 16; + u64 avg_con : 9; + u64 reserved_93 : 1; + u64 pool_drop_ena : 1; + u64 aura_drop_ena : 1; + u64 bp_ena : 1; + u64 reserved_97_103 : 7; + u64 aura_drop : 8; + u64 shift : 6; + u64 reserved_118_119 : 2; + u64 avg_level : 8; + u64 count : 36; /* W2 */ + u64 reserved_164_167 : 4; + u64 bpid : 12; + u64 reserved_180_191 : 12; + u64 limit : 36; /* W3 */ + u64 reserved_228_231 : 4; + u64 bp : 7; + u64 reserved_239_243 : 5; + u64 fc_ena : 1; + u64 fc_up_crossing : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 reserved_252_255 : 4; + u64 fc_addr; /* W4 */ + u64 pool_drop : 8; /* W5 */ + u64 update_time : 16; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_363 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_371 : 1; + u64 err_qint_idx : 7; + u64 reserved_379_383 : 5; + u64 thresh : 36; /* W6*/ + u64 rsvd_423_420 : 4; + u64 fc_msh_dst : 11; + u64 reserved_435_438 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_445_445 : 1; + u64 stream_ctx : 1; + u64 unified_ctx : 1; + u64 reserved_448_511; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_pool_s { + u64 stack_base; /* W0 */ + u64 ena : 1; + u64 nat_align : 1; + u64 reserved_66_67 : 2; + u64 stack_caching : 1; + u64 reserved_69_87 : 19; + u64 buf_offset : 12; + u64 reserved_100_103 : 4; + u64 buf_size : 12; + u64 reserved_116_119 : 4; + u64 ref_cnt_prof : 3; + u64 reserved_123_127 : 5; + u64 stack_max_pages : 32; + u64 stack_pages : 32; + uint64_t bp_0 : 7; + uint64_t bp_1 : 7; + uint64_t bp_2 : 7; + uint64_t bp_3 : 7; + uint64_t bp_4 : 7; + uint64_t bp_5 : 7; + uint64_t bp_6 : 7; + uint64_t bp_7 : 7; + uint64_t bp_ena_0 : 1; + uint64_t bp_ena_1 : 1; + uint64_t bp_ena_2 : 1; + uint64_t bp_ena_3 : 1; + uint64_t bp_ena_4 : 1; + uint64_t bp_ena_5 : 1; + uint64_t bp_ena_6 : 1; + uint64_t bp_ena_7 : 1; + u64 stack_offset : 4; + u64 reserved_260_263 : 4; + u64 shift : 6; + u64 reserved_270_271 : 2; + u64 avg_level : 8; + u64 avg_con : 9; + u64 fc_ena : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 fc_up_crossing : 1; + u64 reserved_297_299 : 3; + u64 update_time : 16; + u64 reserved_316_319 : 4; + u64 fc_addr; /* W5 */ + u64 ptr_start; /* W6 */ + u64 ptr_end; /* W7 */ + u64 bpid_0 : 12; + u64 reserved_524_535 : 12; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_555 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_563 : 1; + u64 err_qint_idx : 7; + u64 reserved_571_575 : 5; + u64 thresh : 36; + u64 rsvd_612_615 : 4; + u64 fc_msh_dst : 11; + u64 reserved_627_630 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_637_637 : 1; + u64 stream_ctx : 1; + u64 reserved_639 : 1; + u64 reserved_640_703; /* W10 */ + u64 reserved_704_767; /* W11 */ + u64 reserved_768_831; /* W12 */ + u64 reserved_832_895; /* W13 */ + u64 reserved_896_959; /* W14 */ + u64 reserved_960_1023; /* W15 */ +}; + +static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE); + #endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 933073cd2280..a3e273126e4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -203,6 +203,8 @@ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ +M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \ + npa_cn20k_aq_enq_rsp) \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ @@ -336,6 +338,8 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ +M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \ + nix_cn20k_aq_enq_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -832,6 +836,39 @@ struct npa_aq_enq_rsp { }; }; +struct npa_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 aura_id; + u8 ctype; + u8 op; + union { + /* Valid when op == WRITE/INIT and ctype == AURA. + * LF fills the pool_id in aura.pool_addr. AF will translate + * the pool_id to pool context pointer. + */ + struct npa_cn20k_aura_s aura; + /* Valid when op == WRITE/INIT and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; + /* Mask data when op == WRITE (1=write, 0=don't write) */ + union { + /* Valid when op == WRITE and ctype == AURA */ + struct npa_cn20k_aura_s aura_mask; + /* Valid when op == WRITE and ctype == POOL */ + struct npa_cn20k_pool_s pool_mask; + }; +}; + +struct npa_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + /* Valid when op == READ and ctype == AURA */ + struct npa_cn20k_aura_s aura; + /* Valid when op == READ and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; +}; + /* Disable all contexts of type 'ctype' */ struct hwctx_disable_req { struct mbox_msghdr hdr; @@ -940,6 +977,42 @@ struct nix_lf_free_req { u64 flags; }; +/* CN20K NIX AQ enqueue msg */ +struct nix_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; + union { + struct nix_cn20k_rq_ctx_s rq_mask; + struct nix_cn20k_sq_ctx_s sq_mask; + struct nix_cn20k_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + struct nix_bandprof_s prof_mask; + }; +}; + +struct nix_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; +}; + /* CN10K NIX AQ enqueue msg */ struct nix_cn10k_aq_enq_req { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b58283341923..e85dac2c806d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -498,6 +498,14 @@ struct channel_fwdata { u8 reserved[RVU_CHANL_INFO_RESERVED]; }; +struct altaf_intr_notify { + unsigned long flr_pf_bmap[2]; + unsigned long flr_vf_bmap[2]; + unsigned long gint_paddr; + unsigned long gint_iova_addr; + unsigned long reserved[6]; +}; + struct rvu_fwdata { #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ #define RVU_FWDATA_VERSION 0x0001 @@ -517,7 +525,8 @@ struct rvu_fwdata { u32 ptp_ext_clk_rate; u32 ptp_ext_tstamp; struct channel_fwdata channel_data; -#define FWDATA_RESERVED_MEM 958 + struct altaf_intr_notify altaf_intr_info; +#define FWDATA_RESERVED_MEM 946 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 9 #define CGX_LMACS_MAX 4 @@ -648,6 +657,7 @@ struct rvu { struct mutex mbox_lock; /* Serialize mbox up and down msgs */ u16 rep_pcifunc; + bool altaf_ready; int rep_cnt; u16 *rep2pfvf_map; u8 rep_mode; @@ -1032,6 +1042,9 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, int blkaddr, int nixlf); void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr); +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp); + /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 8375f18c8e07..7370812ece2a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -21,6 +21,8 @@ #include "rvu_npc_hash.h" #include "mcs.h" +#include "cn20k/debugfs.h" + #define DEBUGFS_DIR_NAME "octeontx2" enum { @@ -1101,6 +1103,11 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_aura_s *aura = &rsp->aura; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", @@ -1149,6 +1156,11 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_pool_s *pool = &rsp->pool; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", @@ -2009,10 +2021,16 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx); + return; + } + if (!is_rvu_otx2(rvu)) { print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); return; } + seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", sq_ctx->sqe_way_mask, sq_ctx->cq); seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", @@ -2103,7 +2121,9 @@ static void print_nix_cn10k_rq_ctx(struct seq_file *m, seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); - seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); + seq_printf(m, "W2: band_prof_id \t\t%d\n", + (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id); + seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", @@ -2225,6 +2245,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); @@ -2254,6 +2279,7 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", cq_ctx->qsize, cq_ctx->caching); + seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", cq_ctx->substream, cq_ctx->ena); if (!is_rvu_otx2(rvu)) { @@ -2615,7 +2641,10 @@ static void print_band_prof_ctx(struct seq_file *m, (prof->rc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: rc_action\t\t%s\n", str); seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); - seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); + + seq_printf(m, "W1: band_prof_id\t%d\n", + (u16)prof->band_prof_id_h << 7 | prof->band_prof_id); + seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); @@ -2784,6 +2813,9 @@ static void rvu_dbg_npa_init(struct rvu *rvu) &rvu_dbg_npa_aura_ctx_fops); debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_pool_ctx_fops); + + if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */ + return; debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_ndc_cache_fops); debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, @@ -3950,6 +3982,9 @@ static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) static const char *rvu_get_dbg_dir_name(struct rvu *rvu) { + if (is_cn20k(rvu->pdev)) + return "cn20k"; + if (!is_rvu_otx2(rvu)) return "cn10k"; else diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 828316211b24..2f485a930edd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1019,6 +1019,12 @@ static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, { struct nix_cn10k_aq_enq_req *aq_req; + if (is_cn20k(rvu->pdev)) { + *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; + *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; + return; + } + if (!is_rvu_otx2(rvu)) { aq_req = (struct nix_cn10k_aq_enq_req *)req; *smq = aq_req->sq.smq; @@ -1149,36 +1155,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, case NIX_AQ_INSTOP_WRITE: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(mask, &req->rq_mask, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(mask, &req->sq_mask, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(mask, &req->cq_mask, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(mask, &req->rss_mask, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(mask, &req->prof_mask, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) - memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) - memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) - memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) - memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) - memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) - memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); + memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: @@ -1243,22 +1249,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (req->op == NIX_AQ_INSTOP_READ) { if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(&rsp->rq, ctx, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(&rsp->sq, ctx, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(&rsp->cq, ctx, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(&rsp->rss, ctx, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(&rsp->prof, ctx, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); } } @@ -1289,8 +1295,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, /* Make copy of original context & mask which are required * for resubmission */ - memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); - memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); + memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); /* exclude fields which HW can update */ aq_req.cq_mask.cq_err = 0; @@ -1309,7 +1315,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, * updated fields are masked out for request and response * comparison */ - for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); word++) { *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); @@ -1317,14 +1323,14 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); } - if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; return 0; } -static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp) +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; int err, retries = 5; @@ -5812,6 +5818,8 @@ static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) } } +#define NIX_BW_PROF_HI_MASK GENMASK(10, 7) + static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc) { @@ -5850,7 +5858,8 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, return -EINVAL; ipolicer = &nix_hw->ipolicer[hi_layer]; - prof_idx = req->prof.band_prof_id; + prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); + prof_idx |= req->prof.band_prof_id; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) return -EINVAL; @@ -6015,8 +6024,10 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, aq_req->op = NIX_AQ_INSTOP_WRITE; aq_req->qidx = leaf_prof; - aq_req->prof.band_prof_id = mid_prof; + aq_req->prof.band_prof_id = mid_prof & 0x7F; aq_req->prof_mask.band_prof_id = GENMASK(6, 0); + aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); + aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); aq_req->prof.hl_en = 1; aq_req->prof_mask.hl_en = 1; @@ -6025,6 +6036,8 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, (struct nix_aq_enq_rsp *)aq_rsp); } +#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) + int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, u16 rq_idx, u16 match_id) { @@ -6056,7 +6069,8 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, return 0; /* Get the bandwidth profile ID mapped to this RQ */ - leaf_prof = aq_rsp.rq.band_prof_id; + leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); + leaf_prof |= aq_rsp.rq.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; ipolicer->match_id[leaf_prof] = match_id; @@ -6094,7 +6108,10 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, * to different RQs and marked with same match_id * are rate limited in a aggregate fashion */ - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, + aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_prof, mid_prof); @@ -6216,7 +6233,8 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, if (!aq_rsp.prof.hl_en) return; - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; ipolicer->ref_count[mid_prof]--; /* If ref_count is zero, free mid layer profile */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 4f5ca5ab13a4..e2a33e46b48a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -464,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, return 0; } +static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return; + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); + cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); +} + static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) { u64 cfg; @@ -479,14 +496,7 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); #endif - /* Do not bypass NDC cache */ - cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); - cfg &= ~0x03DULL; -#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING - /* Disable caching of stack pages */ - cfg |= 0x10ULL; -#endif - rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + npa_aq_ndc_config(rvu, block); /* For CN10K NPA BATCH DMA set 35 cache lines */ if (!is_rvu_otx2(rvu)) { @@ -567,6 +577,9 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr) int bank, max_bank, line, max_line, err; u64 reg, ndc_af_const; + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return 0; + /* Set the ENABLE bit(63) to '0' */ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL); rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 0596a3ac4c12..8e868f815de1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -13,6 +13,8 @@ #define RVU_MULTI_BLK_VER 0x7ULL +#define NIX_MAX_CTX_SIZE 128 + /* RVU Block Address Enumeration */ enum rvu_block_addr_e { BLKADDR_RVUM = 0x0ULL, @@ -370,8 +372,12 @@ struct nix_cq_ctx_s { u64 qsize : 4; u64 cq_err_int : 8; u64 cq_err_int_ena : 8; + /* Ensure all context sizes are 128 bytes */ + u64 padding[12]; }; +static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Receive queue context structure */ struct nix_cn10k_rq_ctx_s { u64 ena : 1; @@ -413,7 +419,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_171 : 1; u64 later_skip : 6; u64 xqe_imm_size : 6; - u64 rsvd_189_184 : 6; + u64 band_prof_id_h : 4; + u64 rsvd_189_188 : 2; u64 xqe_imm_copy : 1; u64 xqe_hdr_split : 1; u64 xqe_drop : 8; /* W3 */ @@ -460,6 +467,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Send queue context structure */ struct nix_cn10k_sq_ctx_s { u64 ena : 1; @@ -523,6 +532,8 @@ struct nix_cn10k_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive queue context structure */ struct nix_rq_ctx_s { u64 ena : 1; @@ -594,6 +605,8 @@ struct nix_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX sqe sizes */ enum nix_maxsqesz { NIX_MAXSQESZ_W16 = 0x0, @@ -668,13 +681,18 @@ struct nix_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive side scaling entry structure*/ struct nix_rsse_s { uint32_t rq : 20; uint32_t reserved_20_31 : 12; - + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE); + /* NIX receive multicast/mirror entry structure */ struct nix_rx_mce_s { uint64_t op : 2; @@ -684,8 +702,12 @@ struct nix_rx_mce_s { uint64_t rsvd_31_24 : 8; uint64_t pf_func : 16; uint64_t next : 16; + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE); + enum nix_band_prof_layers { BAND_PROF_LEAF_LAYER = 0, BAND_PROF_INVAL_LAYER = 1, @@ -736,7 +758,8 @@ struct nix_bandprof_s { uint64_t rc_action : 2; uint64_t meter_algo : 2; uint64_t band_prof_id : 7; - uint64_t reserved_111_118 : 8; + uint64_t band_prof_id_h : 4; + uint64_t reserved_115_118 : 4; uint64_t hl_en : 1; uint64_t reserved_120_127 : 8; uint64_t ts : 48; /* W2 */ @@ -769,6 +792,8 @@ struct nix_bandprof_s { uint64_t reserved_1008_1023 : 16; }; +static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE); + enum nix_lsoalg { NIX_LSOALG_NOP, NIX_LSOALG_ADD_SEGNUM, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index bec7d5b4d7cc..3e1bf22cba69 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -15,6 +15,8 @@ static struct dev_hw_ops otx2_hw_ops = { .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -23,6 +25,8 @@ static struct dev_hw_ops cn10k_hw_ops = { .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; void otx2_init_hw_ops(struct otx2_nic *pfvf) @@ -337,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, aq->rq.band_prof_id = policer; aq->rq_mask.band_prof_id = GENMASK(9, 0); + /* If policer id is greater than 1023 then it implies hardware supports + * more leaf profiles. In that case use band_prof_id_h for 4 MSBs. + */ + aq->rq.band_prof_id_h = policer >> 10; + aq->rq_mask.band_prof_id_h = GENMASK(3, 0); + /* Fill AQ info */ aq->qidx = rq_idx; aq->ctype = NIX_AQ_CTYPE_RQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c index ec8cde98076d..a60f8cf53feb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -10,17 +10,6 @@ #include "otx2_struct.h" #include "cn10k.h" -static struct dev_hw_ops cn20k_hw_ops = { - .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, - .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, - .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, -}; - -void cn20k_init(struct otx2_nic *pfvf) -{ - pfvf->hw_ops = &cn20k_hw_ops; -} -EXPORT_SYMBOL(cn20k_init); /* CN20K mbox AF => PFx irq handler */ irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) { @@ -250,3 +239,212 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) return 0; } + +#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ + +static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id) +{ +#ifdef CONFIG_DCB + return pfvf->queue_to_pfc_map[aura_id]; +#else + return 0; +#endif +} + +static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + u8 bpid_idx; + int err; + + pool = &pfvf->qset.pool[pool_id]; + + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) + return err; + } + + /* Initialize this aura's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + } + + aq->aura_id = aura_id; + + /* Will be filled by AF with correct pool context address */ + aq->aura.pool_addr = pool_id; + aq->aura.pool_caching = 1; + aq->aura.shift = ilog2(numptrs) - 8; + aq->aura.count = numptrs; + aq->aura.limit = numptrs; + aq->aura.avg_level = 255; + aq->aura.ena = 1; + aq->aura.fc_ena = 1; + aq->aura.fc_addr = pool->fc_addr->iova; + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ + + /* Enable backpressure for RQ aura */ + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { + aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; + + bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id); + aq->aura.bpid = pfvf->bpid[bpid_idx]; + + /* Set backpressure level for RQ's Aura */ + aq->aura.bp = RQ_BP_LVL_AURA; + } + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_AURA; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type) +{ + struct page_pool_params pp_params = { 0 }; + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + int err, sz; + + pool = &pfvf->qset.pool[pool_id]; + /* Alloc memory for stack which is used to store buffer pointers */ + err = qmem_alloc(pfvf->dev, &pool->stack, + stack_pages, pfvf->hw.stack_pg_bytes); + if (err) + return err; + + pool->rbsize = buf_size; + + /* Initialize this pool's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + qmem_free(pfvf->dev, pool->stack); + return -ENOMEM; + } + } + + aq->aura_id = pool_id; + aq->pool.stack_base = pool->stack->iova; + aq->pool.stack_caching = 1; + aq->pool.ena = 1; + aq->pool.buf_size = buf_size / 128; + aq->pool.stack_max_pages = stack_pages; + aq->pool.shift = ilog2(numptrs) - 8; + aq->pool.ptr_start = 0; + aq->pool.ptr_end = ~0ULL; + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_POOL; + aq->op = NPA_AQ_INSTOP_INIT; + + if (type != AURA_NIX_RQ) { + pool->page_pool = NULL; + return 0; + } + + sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE); + pp_params.order = get_order(sz); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } + + return 0; +} + +static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) +{ + struct nix_cn20k_aq_enq_req *aq; + struct otx2_nic *pfvf = dev; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->sq.cq = pfvf->hw.rx_queues + qidx; + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ + aq->sq.cq_ena = 1; + aq->sq.ena = 1; + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ + aq->sq.sqb_aura = sqb_aura; + aq->sq.sq_int_ena = NIX_SQINT_BITS; + aq->sq.qint_idx = 0; + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow. + */ + aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, + .sq_aq_init = cn20k_sq_aq_init, + .sqe_flush = cn10k_sqe_flush, + .aura_freeptr = cn10k_aura_freeptr, + .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .aura_aq_init = cn20k_aura_aq_init, + .pool_aq_init = cn20k_pool_aq_init, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 902d6abaa3ec..75ebb17419c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1369,6 +1369,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs) { + return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id, + numptrs); +} + +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1446,6 +1453,13 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { + return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs, + buf_size, type); +} + +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type) +{ struct page_pool_params pp_params = { 0 }; struct xsk_buff_pool *xsk_pool; struct npa_aq_enq_req *aq; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1c8a3c078a64..e616a727a3a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -14,6 +14,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> +#include <linux/soc/marvell/silicons.h> #include <linux/soc/marvell/octeontx2/asm.h> #include <net/macsec.h> #include <net/pkt_cls.h> @@ -375,6 +376,11 @@ struct dev_hw_ops { irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); + int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); + int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type); }; #define CN10K_MCS_SA_PER_SC 4 @@ -527,7 +533,7 @@ struct otx2_nic { u32 nix_lmt_size; struct otx2_ptp *ptp; - struct hwtstamp_config tstamp; + struct kernel_hwtstamp_config tstamp; unsigned long rq_bmap; @@ -1059,6 +1065,10 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); int otx2_set_hw_capabilities(struct otx2_nic *pfvf); +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1098,8 +1108,11 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); -int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); -int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); +int otx2_config_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int otx2_config_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e808995703cf..a7feb4c392b3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2445,18 +2445,26 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) return 0; } -int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +int otx2_config_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + *config = pfvf->tstamp; + return 0; +} +EXPORT_SYMBOL(otx2_config_hwtstamp_get); + +int otx2_config_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); - struct hwtstamp_config config; if (!pfvf->ptp) return -ENODEV; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; @@ -2465,8 +2473,11 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) otx2_config_hw_tx_tstamp(pfvf, false); break; case HWTSTAMP_TX_ONESTEP_SYNC: - if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) + if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) { + NL_SET_ERR_MSG_MOD(extack, + "One-step time stamping is not supported"); return -ERANGE; + } pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; schedule_delayed_work(&pfvf->ptp->synctstamp_work, msecs_to_jiffies(500)); @@ -2478,7 +2489,7 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: otx2_config_hw_rx_tstamp(pfvf, false); break; @@ -2497,35 +2508,17 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: otx2_config_hw_rx_tstamp(pfvf, true); - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - memcpy(&pfvf->tstamp, &config, sizeof(config)); + pfvf->tstamp = *config; - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; -} -EXPORT_SYMBOL(otx2_config_hwtstamp); - -int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct otx2_nic *pfvf = netdev_priv(netdev); - struct hwtstamp_config *cfg = &pfvf->tstamp; - - switch (cmd) { - case SIOCSHWTSTAMP: - return otx2_config_hwtstamp(netdev, req); - case SIOCGHWTSTAMP: - return copy_to_user(req->ifr_data, cfg, - sizeof(*cfg)) ? -EFAULT : 0; - default: - return -EOPNOTSUPP; - } + return 0; } -EXPORT_SYMBOL(otx2_ioctl); +EXPORT_SYMBOL(otx2_config_hwtstamp_set); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { @@ -2942,7 +2935,6 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_features = otx2_set_features, .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, - .ndo_eth_ioctl = otx2_ioctl, .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, @@ -2951,6 +2943,8 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, + .ndo_hwtstamp_get = otx2_config_hwtstamp_get, + .ndo_hwtstamp_set = otx2_config_hwtstamp_set, }; int otx2_wq_init(struct otx2_nic *pf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 25381f079b97..f4fdbfba8667 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -534,8 +534,9 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, - .ndo_eth_ioctl = otx2_ioctl, .ndo_setup_tc = otx2_setup_tc, + .ndo_hwtstamp_get = otx2_config_hwtstamp_get, + .ndo_hwtstamp_set = otx2_config_hwtstamp_set, }; static int otx2_vf_wq_init(struct otx2_nic *vf) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 308b4458e0d4..81bf8908b897 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2420,21 +2420,22 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int mlx4_en_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; /* device doesn't support time stamping */ - if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) { + NL_SET_ERR_MSG_MOD(extack, + "device doesn't support time stamping"); return -EINVAL; + } /* TX HW timestamp */ - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -2443,7 +2444,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) } /* RX HW timestamp */ - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -2461,39 +2462,27 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } if (mlx4_en_reset_config(dev, config, dev->features)) { - config.tx_type = HWTSTAMP_TX_OFF; - config.rx_filter = HWTSTAMP_FILTER_NONE; + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_NONE; } - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; } -static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int mlx4_en_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct mlx4_en_priv *priv = netdev_priv(dev); - return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, - sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; -} - -static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx4_en_hwtstamp_set(dev, ifr); - case SIOCGHWTSTAMP: - return mlx4_en_hwtstamp_get(dev, ifr); - default: - return -EOPNOTSUPP; - } + *config = priv->hwtstamp_config; + return 0; } static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, @@ -2560,7 +2549,7 @@ static int mlx4_en_set_features(struct net_device *netdev, } if (reset) { - ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + ret = mlx4_en_reset_config(netdev, &priv->hwtstamp_config, features); if (ret) return ret; @@ -2844,7 +2833,6 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, - .ndo_eth_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, @@ -2858,6 +2846,8 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, .ndo_bpf = mlx4_xdp, + .ndo_hwtstamp_get = mlx4_en_hwtstamp_get, + .ndo_hwtstamp_set = mlx4_en_hwtstamp_set, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -3512,7 +3502,7 @@ out: } int mlx4_en_reset_config(struct net_device *dev, - struct hwtstamp_config ts_config, + struct kernel_hwtstamp_config *ts_config, netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -3522,8 +3512,8 @@ int mlx4_en_reset_config(struct net_device *dev, int port_up = 0; int err = 0; - if (priv->hwtstamp_config.tx_type == ts_config.tx_type && - priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + if (priv->hwtstamp_config.tx_type == ts_config->tx_type && + priv->hwtstamp_config.rx_filter == ts_config->rx_filter && !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) return 0; /* Nothing to change */ @@ -3542,7 +3532,7 @@ int mlx4_en_reset_config(struct net_device *dev, mutex_lock(&mdev->state_lock); memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); - memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); + memcpy(&new_prof.hwtstamp_config, ts_config, sizeof(*ts_config)); err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) @@ -3560,7 +3550,7 @@ int mlx4_en_reset_config(struct net_device *dev, dev->features |= NETIF_F_HW_VLAN_CTAG_RX; else dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + } else if (ts_config->rx_filter == HWTSTAMP_FILTER_NONE) { /* RX time-stamping is OFF, update the RX vlan offload * to the latest wanted state */ @@ -3581,7 +3571,7 @@ int mlx4_en_reset_config(struct net_device *dev, * Regardless of the caller's choice, * Turn Off RX vlan offload in case of time-stamping is ON */ - if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (ts_config->rx_filter != HWTSTAMP_FILTER_NONE) { if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ad0d91a75184..aab97694f86b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -388,7 +388,7 @@ struct mlx4_en_port_profile { u8 num_up; int rss_rings; int inline_thold; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; }; struct mlx4_en_profile { @@ -612,7 +612,7 @@ struct mlx4_en_priv { bool wol; struct device *ddev; struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB @@ -780,7 +780,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); int mlx4_en_moderation_update(struct mlx4_en_priv *priv); int mlx4_en_reset_config(struct net_device *dev, - struct hwtstamp_config ts_config, + struct kernel_hwtstamp_config *ts_config, netdev_features_t new_features); void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, struct mlx4_en_stats_bitmap *stats_bitmap, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 891bbbbfbbf1..64c04f52990f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -564,10 +564,14 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) { - u64 fsystem_guid, psystem_guid; + u8 fsystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 psystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 flen; + u8 plen; - fsystem_guid = mlx5_query_nic_system_image_guid(dev); - psystem_guid = mlx5_query_nic_system_image_guid(peer_dev); + mlx5_query_nic_sw_system_image_guid(dev, fsystem_guid, &flen); + mlx5_query_nic_sw_system_image_guid(peer_dev, psystem_guid, &plen); - return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); + return plen && flen && flen == plen && + !memcmp(fsystem_guid, psystem_guid, flen); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a163f81f07c1..3ada7c16adfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -699,7 +699,7 @@ struct mlx5e_rq { struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; struct mlx5e_cq_decomp cqd; - struct hwtstamp_config *tstamp; + struct kernel_hwtstamp_config *hwtstamp_config; struct mlx5_clock *clock; struct mlx5e_icosq *icosq; struct mlx5e_priv *priv; @@ -787,7 +787,6 @@ struct mlx5e_channel { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); int ix; int vec_ix; @@ -921,7 +920,7 @@ struct mlx5e_priv { u8 max_opened_tc; bool tx_ptp_opened; bool rx_ptp_opened; - struct hwtstamp_config tstamp; + struct kernel_hwtstamp_config hwtstamp_config; u16 q_counter[MLX5_SD_MAX_GROUP_SZ]; u16 drop_rq_q_counter; struct notifier_block events_nb; @@ -1030,8 +1029,11 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); +int mlx5e_hwtstamp_set(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int mlx5e_hwtstamp_get(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config); int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -1157,7 +1159,9 @@ extern const struct ethtool_ops mlx5e_ethtool_ops; int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); -int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, +int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb); +int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb, bool enable_mc_lb); void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index 0b1ac6e5c890..8818f65d1fbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -40,11 +40,8 @@ void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev) static void mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { - u64 parent_id; - - parent_id = mlx5_query_nic_system_image_guid(dev); - ppid->id_len = sizeof(parent_id); - memcpy(ppid->id, &parent_id, sizeof(parent_id)); + BUILD_BUG_ON(MLX5_SW_IMAGE_GUID_MAX_BYTES > MAX_PHYS_ITEM_ID_LEN); + mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len); } int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c index 4e72ca8070e2..1de18c7e96ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c @@ -6,6 +6,7 @@ #include <linux/xarray.h> #include <linux/hashtable.h> #include <linux/refcount.h> +#include <linux/mlx5/driver.h> #include "mapping.h" @@ -24,7 +25,8 @@ struct mapping_ctx { struct delayed_work dwork; struct list_head pending_list; spinlock_t pending_list_lock; /* Guards pending list */ - u64 id; + u8 id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 id_len; u8 type; struct list_head list; refcount_t refcount; @@ -220,13 +222,15 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal) } struct mapping_ctx * -mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal) +mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id, + bool delayed_removal) { struct mapping_ctx *ctx; mutex_lock(&shared_ctx_lock); list_for_each_entry(ctx, &shared_ctx_list, list) { - if (ctx->id == id && ctx->type == type) { + if (ctx->type == type && ctx->id_len == id_len && + !memcmp(id, ctx->id, id_len)) { if (refcount_inc_not_zero(&ctx->refcount)) goto unlock; break; @@ -237,7 +241,8 @@ mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delaye if (IS_ERR(ctx)) goto unlock; - ctx->id = id; + memcpy(ctx->id, id, id_len); + ctx->id_len = id_len; ctx->type = type; list_add(&ctx->list, &shared_ctx_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h index 4e2119f0f4c1..e86a103d58b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h @@ -27,6 +27,7 @@ void mapping_destroy(struct mapping_ctx *ctx); /* adds mapping with an id or get an existing mapping with the same id */ struct mapping_ctx * -mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal); +mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id, + bool delayed_removal); #endif /* __MLX5_MAPPING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index c93ee969ea64..12e10feb30f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -713,7 +713,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, rq->netdev = priv->netdev; rq->priv = priv; rq->clock = mdev->clock; - rq->tstamp = &priv->tstamp; + rq->hwtstamp_config = &priv->hwtstamp_config; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->ptp_stats.rq; @@ -896,7 +896,6 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->priv = priv; c->mdev = priv->mdev; - c->tstamp = &priv->tstamp; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 1b3c9648220b..1c0e0a86a9ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -64,7 +64,6 @@ struct mlx5e_ptp { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES); struct mlx5_sq_bfreg *bfreg; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 9d1c677814e0..87a2ad69526d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -30,15 +30,11 @@ static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswi { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev, *esw_mdev; - u64 system_guid, esw_system_guid; mdev = priv->mdev; esw_mdev = esw->dev; - system_guid = mlx5_query_nic_system_image_guid(mdev); - esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev); - - return system_guid == esw_system_guid; + return mlx5_same_hw_devs(mdev, esw_mdev); } static struct net_device * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index b1415992ffa2..0686fbdd5a05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -318,7 +318,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx struct devlink_fmsg *fmsg) { mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); + devlink_fmsg_u32_pair_put(fmsg, "filter_type", + priv->hwtstamp_config.rx_filter); mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); mlx5e_health_fmsg_named_obj_nest_end(fmsg); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index c96cbc4b0dbf..88b0e1050d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -231,6 +231,8 @@ mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, rqtn, rss_inner); mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); rss_tt = mlx5e_rss_get_tt_config(rss, tt); + mlx5e_tir_builder_build_self_lb_block(builder, rss->params.self_lb_blk, + rss->params.self_lb_blk); mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); err = mlx5e_tir_init(tir, builder, rss->mdev, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index 5fb03cd0a411..17664757a561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -23,6 +23,7 @@ struct mlx5e_rss_init_params { struct mlx5e_rss_params { bool inner_ft_support; u32 drop_rqn; + bool self_lb_blk; }; struct mlx5e_rss_params_traffic_type diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index ac26a32845d0..55c117b7d8c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -71,6 +71,8 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, rss_params = (struct mlx5e_rss_params) { .inner_ft_support = inner_ft_support, .drop_rqn = res->drop_rqn, + .self_lb_blk = + res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK, }; rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params); @@ -104,6 +106,8 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int in rss_params = (struct mlx5e_rss_params) { .inner_ft_support = inner_ft_support, .drop_rqn = res->drop_rqn, + .self_lb_blk = + res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK, }; rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params); @@ -346,6 +350,7 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + bool self_lb_blk = res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK; struct mlx5e_tir_builder *builder; int err = 0; int ix; @@ -376,6 +381,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), inner_ft_support); mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); + mlx5e_tir_builder_build_self_lb_block(builder, self_lb_blk, + self_lb_blk); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 65a857c215e1..675780120a20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -21,6 +21,7 @@ enum mlx5e_rx_res_features { MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), MLX5E_RX_RES_FEATURE_PTP = BIT(1), MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2), + MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK = BIT(3), }; /* Setup */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c index 896f718483c3..991f47050643 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c @@ -307,7 +307,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_int_port_priv *int_port_priv; - u64 mapping_id; + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 id_len; if (!mlx5e_tc_int_port_supported(esw)) return NULL; @@ -316,9 +317,10 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv) if (!int_port_priv) return NULL; - mapping_id = mlx5_query_nic_system_image_guid(priv->mdev); + mlx5_query_nic_sw_system_image_guid(priv->mdev, mapping_id, &id_len); - int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT, + int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_INT_PORT, sizeof(u32) * 2, (1 << ESW_VPORT_BITS) - 1, true); if (IS_ERR(int_port_priv->metadata_mapping)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 870d12364f99..fc0e57403d25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2287,9 +2287,10 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, enum mlx5_flow_namespace_type ns_type, struct mlx5e_post_act *post_act) { + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_tc_ct_priv *ct_priv; struct mlx5_core_dev *dev; - u64 mapping_id; + u8 id_len; int err; dev = priv->mdev; @@ -2301,16 +2302,18 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!ct_priv) goto err_alloc; - mapping_id = mlx5_query_nic_system_image_guid(dev); + mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len); - ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE, + ct_priv->zone_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_ZONE, sizeof(u16), 0, true); if (IS_ERR(ct_priv->zone_mapping)) { err = PTR_ERR(ct_priv->zone_mapping); goto err_mapping_zone; } - ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS, + ct_priv->labels_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_LABELS, sizeof(u32) * 4, 0, true); if (IS_ERR(ct_priv->labels_mapping)) { err = PTR_ERR(ct_priv->labels_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index 19499072f67f..0b55e77f19c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -146,6 +146,31 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder) MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } +static void mlx5e_tir_context_self_lb_block(void *tirc, bool enable_uc_lb, + bool enable_mc_lb) +{ + u8 lb_flags = 0; + + if (enable_uc_lb) + lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; + if (enable_mc_lb) + lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; + + MLX5_SET(tirc, tirc, self_lb_block, lb_flags); +} + +void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder, + bool enable_uc_lb, + bool enable_mc_lb) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.self_lb_en, 1); + + mlx5e_tir_context_self_lb_block(tirc, enable_uc_lb, enable_mc_lb); +} + void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) { void *tirc = mlx5e_tir_builder_get_tirc(builder); @@ -153,9 +178,7 @@ void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) WARN_ON(builder->modify); MLX5_SET(tirc, tirc, tls_en, 1); - MLX5_SET(tirc, tirc, self_lb_block, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); + mlx5e_tir_context_self_lb_block(tirc, true, true); } int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h index e8df3aaf6562..958eeb959a19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -35,6 +35,9 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, const struct mlx5e_rss_params_traffic_type *rss_tt, bool inner); void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder, + bool enable_uc_lb, + bool enable_mc_lb); void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder); struct mlx5_core_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 996fcdb5a29d..da8c44f46edb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -47,7 +47,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params rq->netdev = priv->netdev; rq->priv = priv; rq->clock = mdev->clock; - rq->tstamp = &priv->tstamp; + rq->hwtstamp_config = &priv->hwtstamp_config; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &priv->trap_stats.rq; @@ -144,7 +144,6 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->priv = priv; t->mdev = priv->mdev; - t->tstamp = &priv->tstamp; t->pdev = mlx5_core_dma_dev(priv->mdev); t->netdev = priv->netdev; t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h index aa3f17658c6d..394e917ea2b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h @@ -22,7 +22,6 @@ struct mlx5e_trap { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); struct mlx5e_params params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 6760bb0336df..7e191e1569e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -92,7 +92,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq); -static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) +static inline bool mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config *config) { return config->rx_filter == HWTSTAMP_FILTER_ALL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 5d51600935a6..80f9fc10877a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -179,7 +179,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) { const struct mlx5e_xdp_buff *_ctx = (void *)ctx; - if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp))) + if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config))) return -ENODATA; *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index dbd88eb5c082..5981c71cae2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -71,7 +71,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, rq->pdev = c->pdev; rq->netdev = c->netdev; rq->priv = c->priv; - rq->tstamp = c->tstamp; + rq->hwtstamp_config = &c->priv->hwtstamp_config; rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c index 8565cfe8d7dc..38e7c77cc851 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c @@ -28,12 +28,15 @@ struct mlx5e_psp_tx { struct mlx5_flow_handle *rule; struct mutex mutex; /* Protect PSP TX steering */ u32 refcnt; + struct mlx5_fc *tx_counter; }; struct mlx5e_psp_rx_err { struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; - struct mlx5_flow_handle *drop_rule; + struct mlx5_flow_handle *auth_fail_rule; + struct mlx5_flow_handle *err_rule; + struct mlx5_flow_handle *bad_rule; struct mlx5_modify_hdr *copy_modify_hdr; }; @@ -50,6 +53,10 @@ struct mlx5e_accel_fs_psp_prot { struct mlx5e_accel_fs_psp { struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES]; + struct mlx5_fc *rx_counter; + struct mlx5_fc *rx_auth_fail_counter; + struct mlx5_fc *rx_err_counter; + struct mlx5_fc *rx_bad_counter; }; struct mlx5e_psp_fs { @@ -72,9 +79,19 @@ static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i) static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs, struct mlx5e_psp_rx_err *rx_err) { - if (rx_err->drop_rule) { - mlx5_del_flow_rules(rx_err->drop_rule); - rx_err->drop_rule = NULL; + if (rx_err->bad_rule) { + mlx5_del_flow_rules(rx_err->bad_rule); + rx_err->bad_rule = NULL; + } + + if (rx_err->err_rule) { + mlx5_del_flow_rules(rx_err->err_rule); + rx_err->err_rule = NULL; + } + + if (rx_err->auth_fail_rule) { + mlx5_del_flow_rules(rx_err->auth_fail_rule); + rx_err->auth_fail_rule = NULL; } if (rx_err->rule) { @@ -117,6 +134,7 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_core_dev *mdev = fs->mdev; + struct mlx5_flow_destination dest[2]; struct mlx5_flow_act flow_act = {}; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *fte; @@ -147,10 +165,14 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, accel_psp_setup_syndrome_match(spec, PSP_OK); /* create fte */ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.modify_hdr = modify_hdr; - fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, - &fs_prot->default_dest, 1); + dest[0].type = fs_prot->default_dest.type; + dest[0].ft = fs_prot->default_dest.ft; + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter = fs->rx_fs->rx_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2); if (IS_ERR(fte)) { err = PTR_ERR(fte); mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err); @@ -158,22 +180,69 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, } rx_err->rule = fte; - /* add default drop rule */ + /* add auth fail drop rule */ memset(spec, 0, sizeof(*spec)); memset(&flow_act, 0, sizeof(flow_act)); + accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL); /* create fte */ - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; - fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, NULL, 0); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_auth_fail_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); if (IS_ERR(fte)) { err = PTR_ERR(fte); - mlx5_core_err(mdev, "fail to add psp rx err drop rule err=%d\n", err); + mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n", + err); goto out_drop_rule; } - rx_err->drop_rule = fte; + rx_err->auth_fail_rule = fte; + + /* add framing drop rule */ + memset(spec, 0, sizeof(*spec)); + memset(&flow_act, 0, sizeof(flow_act)); + accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER); + /* create fte */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_err_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n", + err); + goto out_drop_auth_fail_rule; + } + rx_err->err_rule = fte; + + /* add misc. errors drop rule */ + memset(spec, 0, sizeof(*spec)); + memset(&flow_act, 0, sizeof(flow_act)); + /* create fte */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_bad_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n", + err); + goto out_drop_error_rule; + } + rx_err->bad_rule = fte; + rx_err->copy_modify_hdr = modify_hdr; goto out_spec; +out_drop_error_rule: + mlx5_del_flow_rules(rx_err->err_rule); + rx_err->err_rule = NULL; +out_drop_auth_fail_rule: + mlx5_del_flow_rules(rx_err->auth_fail_rule); + rx_err->auth_fail_rule = NULL; out_drop_rule: mlx5_del_flow_rules(rx_err->rule); rx_err->rule = NULL; @@ -461,6 +530,10 @@ static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs) return; accel_psp = fs->rx_fs; + mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter); for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { fs_prot = &accel_psp->fs_prot[i]; mutex_destroy(&fs_prot->prot_mutex); @@ -474,7 +547,10 @@ static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs) { struct mlx5e_accel_fs_psp_prot *fs_prot; struct mlx5e_accel_fs_psp *accel_psp; + struct mlx5_core_dev *mdev = fs->mdev; + struct mlx5_fc *flow_counter; enum accel_fs_psp_type i; + int err; accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL); if (!accel_psp) @@ -485,9 +561,68 @@ static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs) mutex_init(&fs_prot->prot_mutex); } + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_err; + } + accel_psp->rx_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx auth fail flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_counter_err; + } + accel_psp->rx_auth_fail_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx error flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_auth_fail_counter_err; + } + accel_psp->rx_err_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx bad flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_err_counter_err; + } + accel_psp->rx_bad_counter = flow_counter; + fs->rx_fs = accel_psp; return 0; + +out_err_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_err_counter); + accel_psp->rx_err_counter = NULL; +out_auth_fail_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter); + accel_psp->rx_auth_fail_counter = NULL; +out_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_counter); + accel_psp->rx_counter = NULL; +out_err: + for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { + fs_prot = &accel_psp->fs_prot[i]; + mutex_destroy(&fs_prot->prot_mutex); + } + kfree(accel_psp); + fs->rx_fs = NULL; + + return err; } void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) @@ -532,6 +667,7 @@ static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; struct mlx5_core_dev *mdev = fs->mdev; struct mlx5_flow_act flow_act = {}; u32 *in, *mc, *outer_headers_c; @@ -580,8 +716,11 @@ static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs) flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP; flow_act.flags |= FLOW_ACT_NO_APPEND; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = tx_fs->tx_counter; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err); @@ -650,6 +789,7 @@ static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs) if (!tx_fs) return; + mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter); mutex_destroy(&tx_fs->mutex); WARN_ON(tx_fs->refcnt); kfree(tx_fs); @@ -658,10 +798,12 @@ static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs) static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs) { + struct mlx5_core_dev *mdev = fs->mdev; struct mlx5_flow_namespace *ns; + struct mlx5_fc *flow_counter; struct mlx5e_psp_tx *tx_fs; - ns = mlx5_get_flow_namespace(fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); if (!ns) return -EOPNOTSUPP; @@ -669,12 +811,55 @@ static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs) if (!tx_fs) return -ENOMEM; + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp tx flow counter err=%pe\n", + flow_counter); + kfree(tx_fs); + return PTR_ERR(flow_counter); + } + tx_fs->tx_counter = flow_counter; mutex_init(&tx_fs->mutex); tx_fs->ns = ns; fs->tx_fs = tx_fs; return 0; } +static void +mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv, + struct mlx5e_psp_stats *stats) +{ + struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_accel_fs_psp *accel_psp; + + accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs; + + if (tx_fs->tx_counter) + mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts, + &stats->psp_tx_bytes); + + if (accel_psp->rx_counter) + mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts, + &stats->psp_rx_bytes); + + if (accel_psp->rx_auth_fail_counter) + mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter, + &stats->psp_rx_pkts_auth_fail, + &stats->psp_rx_bytes_auth_fail); + + if (accel_psp->rx_err_counter) + mlx5_fc_query(mdev, accel_psp->rx_err_counter, + &stats->psp_rx_pkts_frame_err, + &stats->psp_rx_bytes_frame_err); + + if (accel_psp->rx_bad_counter) + mlx5_fc_query(mdev, accel_psp->rx_bad_counter, + &stats->psp_rx_pkts_drop, + &stats->psp_rx_bytes_drop); +} + void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { if (!priv->psp) @@ -849,12 +1034,30 @@ mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack) return mlx5e_psp_rotate_key(priv->mdev); } +static void +mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats) +{ + struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); + struct mlx5e_psp_stats nstats; + + mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats); + stats->rx_packets = nstats.psp_rx_pkts; + stats->rx_bytes = nstats.psp_rx_bytes; + stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail; + stats->rx_error = nstats.psp_rx_pkts_frame_err; + stats->rx_bad = nstats.psp_rx_pkts_drop; + stats->tx_packets = nstats.psp_tx_pkts; + stats->tx_bytes = nstats.psp_tx_bytes; + stats->tx_error = atomic_read(&priv->psp->tx_drop); +} + static struct psp_dev_ops mlx5_psp_ops = { .set_config = mlx5e_psp_set_config, .rx_spi_alloc = mlx5e_psp_rx_spi_alloc, .tx_key_add = mlx5e_psp_assoc_add, .tx_key_del = mlx5e_psp_assoc_del, .key_rotate = mlx5e_psp_key_rotate, + .get_stats = mlx5e_psp_get_stats, }; void mlx5e_psp_unregister(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h index 42bb671fb2cb..6b62fef0d9a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h @@ -7,11 +7,27 @@ #include <net/psp/types.h> #include "en.h" +struct mlx5e_psp_stats { + u64 psp_rx_pkts; + u64 psp_rx_bytes; + u64 psp_rx_pkts_auth_fail; + u64 psp_rx_bytes_auth_fail; + u64 psp_rx_pkts_frame_err; + u64 psp_rx_bytes_frame_err; + u64 psp_rx_pkts_drop; + u64 psp_rx_bytes_drop; + u64 psp_tx_pkts; + u64 psp_tx_bytes; + u64 psp_tx_pkts_drop; + u64 psp_tx_bytes_drop; +}; + struct mlx5e_psp { struct psp_dev *psp; struct psp_dev_caps caps; struct mlx5e_psp_fs *fs; atomic_t tx_key_cnt; + atomic_t tx_drop; }; static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c index 828bff1137af..c17ea0fcd8ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c @@ -186,6 +186,7 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, /* psp_encap of the packet */ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) { kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT); + atomic_inc(&priv->psp->tx_drop); return false; } if (skb_is_gso(skb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 30424ccad584..5a2ac7b6f260 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -247,45 +247,43 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) memset(res, 0, sizeof(*res)); } -int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, - bool enable_mc_lb) +int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb) { - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_tir_builder *builder; struct mlx5e_tir *tir; - u8 lb_flags = 0; - int err = 0; - u32 tirn = 0; - int inlen; - void *in; + int err = 0; - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(true); + if (!builder) return -ENOMEM; - if (enable_uc_lb) - lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; - - if (enable_mc_lb) - lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; - - if (lb_flags) - MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags); - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + mlx5e_tir_builder_build_self_lb_block(builder, enable_uc_lb, + enable_mc_lb); mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) { - tirn = tir->tirn; - err = mlx5_core_modify_tir(mdev, tirn, in); - if (err) + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5_core_err(mdev, + "modify tir(0x%x) enable_lb uc(%d) mc(%d) failed, %d\n", + mlx5e_tir_get_tirn(tir), + enable_uc_lb, enable_mc_lb, err); break; + } } mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); - kvfree(in); - if (err) - netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); + mlx5e_tir_builder_free(builder); return err; } + +int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb) +{ + if (MLX5_CAP_GEN(mdev, tis_tir_td_order)) + return 0; /* refresh not needed */ + + return mlx5e_modify_tirs_lb(mdev, enable_uc_lb, enable_mc_lb); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 893e1380a7c9..01b8f05a23db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -2271,7 +2271,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; + rx_filter = priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE; err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5e17eae81f4b..e537df670758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -735,7 +735,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->pdev = c->pdev; rq->netdev = c->netdev; rq->priv = c->priv; - rq->tstamp = c->tstamp; + rq->hwtstamp_config = &c->priv->hwtstamp_config; rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; @@ -2816,7 +2816,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->priv = priv; c->mdev = mdev; - c->tstamp = &priv->tstamp; c->ix = ix; c->vec_ix = vec_ix; c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix); @@ -3370,12 +3369,12 @@ static int mlx5e_switch_priv_params(struct mlx5e_priv *priv, } static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *old_chs, struct mlx5e_channels *new_chs, mlx5e_fp_preactivate preactivate, void *context) { struct net_device *netdev = priv->netdev; - struct mlx5e_channels old_chs; int carrier_ok; int err = 0; @@ -3384,7 +3383,6 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, mlx5e_deactivate_priv_channels(priv); - old_chs = priv->channels; priv->channels = *new_chs; /* New channels are ready to roll, call the preactivate hook if needed @@ -3393,12 +3391,13 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, if (preactivate) { err = preactivate(priv, context); if (err) { - priv->channels = old_chs; + priv->channels = *old_chs; goto out; } } - mlx5e_close_channels(&old_chs); + if (!MLX5_CAP_GEN(priv->mdev, tis_tir_td_order)) + mlx5e_close_channels(old_chs); priv->profile->update_rx(priv); mlx5e_selq_apply(&priv->selq); @@ -3417,16 +3416,20 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, mlx5e_fp_preactivate preactivate, void *context, bool reset) { - struct mlx5e_channels *new_chs; + struct mlx5e_channels *old_chs, *new_chs; int err; reset &= test_bit(MLX5E_STATE_OPENED, &priv->state); if (!reset) return mlx5e_switch_priv_params(priv, params, preactivate, context); + old_chs = kzalloc(sizeof(*old_chs), GFP_KERNEL); new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL); - if (!new_chs) - return -ENOMEM; + if (!old_chs || !new_chs) { + err = -ENOMEM; + goto err_free_chs; + } + new_chs->params = *params; mlx5e_selq_prepare_params(&priv->selq, &new_chs->params); @@ -3435,11 +3438,18 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, if (err) goto err_cancel_selq; - err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + *old_chs = priv->channels; + + err = mlx5e_switch_priv_channels(priv, old_chs, new_chs, + preactivate, context); if (err) goto err_close; + if (MLX5_CAP_GEN(priv->mdev, tis_tir_td_order)) + mlx5e_close_channels(old_chs); + kfree(new_chs); + kfree(old_chs); return 0; err_close: @@ -3447,7 +3457,9 @@ err_close: err_cancel_selq: mlx5e_selq_cancel(&priv->selq); +err_free_chs: kfree(new_chs); + kfree(old_chs); return err; } @@ -3458,8 +3470,8 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv) { - priv->tstamp.tx_type = HWTSTAMP_TX_OFF; - priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, @@ -4012,6 +4024,11 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->rx_bytes += rq_stats->bytes; s->multicast += rq_stats->mcast_packets; } + +#ifdef CONFIG_MLX5_EN_PSP + if (priv->psp) + s->tx_dropped += atomic_read(&priv->psp->tx_drop); +#endif } void @@ -4754,22 +4771,23 @@ static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx) &new_params.ptp_rx, true); } -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) +int mlx5e_hwtstamp_set(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; bool rx_cqe_compress_def; bool ptp_rx; int err; if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || - (mlx5_clock_get_ptp_index(priv->mdev) == -1)) + (mlx5_clock_get_ptp_index(priv->mdev) == -1)) { + NL_SET_ERR_MSG_MOD(extack, + "Timestamps are not supported on this device"); return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + } /* TX HW timestamp */ - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -4781,7 +4799,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def; /* RX HW timestamp */ - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: ptp_rx = false; break; @@ -4800,7 +4818,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; /* ptp_rx is set if both HW TS is set and CQE * compression is set */ @@ -4813,47 +4831,50 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) err = mlx5e_hwstamp_config_no_ptp_rx(priv, - config.rx_filter != HWTSTAMP_FILTER_NONE); + config->rx_filter != HWTSTAMP_FILTER_NONE); else err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx); if (err) goto err_unlock; - memcpy(&priv->tstamp, &config, sizeof(config)); + priv->hwtstamp_config = *config; mutex_unlock(&priv->state_lock); /* might need to fix some features */ netdev_update_features(priv->netdev); - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; err_unlock: mutex_unlock(&priv->state_lock); return err; } -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) +static int mlx5e_hwtstamp_set_ndo(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config *cfg = &priv->tstamp; + struct mlx5e_priv *priv = netdev_priv(netdev); + return mlx5e_hwtstamp_set(priv, config, extack); +} + +int mlx5e_hwtstamp_get(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config) +{ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) return -EOPNOTSUPP; - return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; + *config = priv->hwtstamp_config; + + return 0; } -static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int mlx5e_hwtstamp_get_ndo(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct mlx5e_priv *priv = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx5e_hwstamp_set(priv, ifr); - case SIOCGHWTSTAMP: - return mlx5e_hwstamp_get(priv, ifr); - default: - return -EOPNOTSUPP; - } + return mlx5e_hwtstamp_get(priv, config); } #ifdef CONFIG_MLX5_ESWITCH @@ -5294,13 +5315,14 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_nic_mtu, - .ndo_eth_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, .ndo_xdp_xmit = mlx5e_xdp_xmit, .ndo_xsk_wakeup = mlx5e_xsk_wakeup, + .ndo_hwtstamp_get = mlx5e_hwtstamp_get_ndo, + .ndo_hwtstamp_set = mlx5e_hwtstamp_set_ndo, #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -6145,7 +6167,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) static int mlx5e_update_nic_rx(struct mlx5e_priv *priv) { - return mlx5e_refresh_tirs(priv, false, false); + return mlx5e_refresh_tirs(priv->mdev, false, false); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 687cf123211d..1f6930c77437 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1604,7 +1604,7 @@ static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, stats->lro_bytes += cqe_bcnt; } - if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config))) skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); @@ -2656,7 +2656,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct hwtstamp_config *tstamp; struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; @@ -2680,7 +2679,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, } priv = mlx5i_epriv(netdev); - tstamp = &priv->tstamp; stats = &priv->channel_stats[rq->ix]->rq; flags_rqpn = be32_to_cpu(cqe->flags_rqpn); @@ -2716,7 +2714,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, stats->csum_none++; } - if (unlikely(mlx5e_rx_hw_stamp(tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config))) skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 2f7a543feca6..fcad464bc4d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -214,7 +214,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, return err; } - err = mlx5e_refresh_tirs(priv, true, false); + err = mlx5e_modify_tirs_lb(priv->mdev, true, false); if (err) goto out; @@ -243,7 +243,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, mlx5_nic_vport_update_local_lb(priv->mdev, false); dev_remove_pack(&lbtp->pt); - mlx5e_refresh_tirs(priv, false, false); + mlx5e_modify_tirs_lb(priv->mdev, false, false); } static int mlx5e_cond_loopback(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 00c2763e57ca..a8773b2342c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3614,15 +3614,11 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; - u64 fsystem_guid, psystem_guid; fmdev = priv->mdev; pmdev = peer_priv->mdev; - fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); - psystem_guid = mlx5_query_nic_system_image_guid(pmdev); - - return (fsystem_guid == psystem_guid); + return mlx5_same_hw_devs(fmdev, pmdev); } static int @@ -5237,10 +5233,11 @@ static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_core_dev *dev = priv->mdev; struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; - u64 mapping_id; + u8 id_len; int err; mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); @@ -5256,11 +5253,13 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); - mapping_id = mlx5_query_nic_system_image_guid(dev); + mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len); - chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + chains_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_CHAIN, sizeof(struct mlx5_mapped_obj), - MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + MLX5E_TC_TABLE_CHAIN_TAG_MASK, + true); if (IS_ERR(chains_mapping)) { err = PTR_ERR(chains_mapping); @@ -5391,14 +5390,15 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) { const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_devcom_match_attr attr = {}; struct netdev_phys_item_id ppid; struct mlx5e_rep_priv *rpriv; struct mapping_ctx *mapping; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; - u64 mapping_id; int err = 0; + u8 id_len; rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); priv = netdev_priv(rpriv->netdev); @@ -5416,9 +5416,9 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); - mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len); - mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, + mapping = mapping_create_for_id(mapping_id, id_len, MAPPING_TYPE_TUNNEL, sizeof(struct tunnel_match_key), TUNNEL_INFO_BITS_MASK, true); @@ -5431,8 +5431,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) /* Two last values are reserved for stack devices slow path table mark * and bridge ingress push mark. */ - mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, - sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); + mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_TUNNEL_ENC_OPTS, + sz_enc_opts, ENC_OPTS_BITS_MASK - 2, + true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5453,7 +5455,7 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) err = netif_get_port_parent_id(priv->netdev, &ppid, false); if (!err) { - memcpy(&attr.key.val, &ppid.id, sizeof(attr.key.val)); + memcpy(&attr.key.buf, &ppid.id, ppid.id_len); attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS; attr.net = mlx5_core_net(esw->dev); mlx5_esw_offloads_devcom_init(esw, &attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c index 0091ba697bae..250af09b5af2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c @@ -4,13 +4,8 @@ #include "fs_core.h" #include "eswitch.h" -enum { - MLX5_ADJ_VPORT_DISCONNECT = 0x0, - MLX5_ADJ_VPORT_CONNECT = 0x1, -}; - -static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, - u16 vport, bool connect) +int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport, + bool connect) { u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {}; @@ -24,7 +19,7 @@ static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1); MLX5_SET(modify_vport_state_in, in, ingress_connect, connect); MLX5_SET(modify_vport_state_in, in, egress_connect, connect); - + MLX5_SET(modify_vport_state_in, in, admin_state, connect); return mlx5_cmd_exec_in(dev, modify_vport_state, in); } @@ -96,7 +91,6 @@ static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id, if (err) goto acl_ns_remove; - mlx5_esw_adj_vport_modify(esw->dev, vport_num, MLX5_ADJ_VPORT_CONNECT); return 0; acl_ns_remove: @@ -117,8 +111,7 @@ static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw, esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n", vport_num, vport->vhca_id); - mlx5_esw_adj_vport_modify(esw->dev, vport_num, - MLX5_ADJ_VPORT_DISCONNECT); + mlx5_esw_offloads_rep_remove(esw, vport); mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering, vport->index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index cf88a106d80d..89a58dee50b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -7,11 +7,7 @@ static void mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { - u64 parent_id; - - parent_id = mlx5_query_nic_system_image_guid(dev); - ppid->id_len = sizeof(parent_id); - memcpy(ppid->id, &parent_id, sizeof(parent_id)); + mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len); } static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 16eb99aba2a7..beaec450a734 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -264,6 +264,9 @@ struct mlx5_eswitch_fdb { struct offloads_fdb { struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *drop_root; + struct mlx5_flow_handle *drop_root_rule; + struct mlx5_fc *drop_root_fc; struct mlx5_flow_table *tc_miss_table; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; @@ -392,6 +395,7 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; u32 last_vport_idx; int mode; + bool offloads_inactive; u16 manager_vport; u16 first_host_vport; u8 num_peers; @@ -634,6 +638,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw); void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw); +int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport, + bool connect); #define MLX5_DEBUG_ESWITCH_MASK BIT(3) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 44a142a041b2..0b1a180ef238 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1577,6 +1577,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) attr.max_grp_num = esw->params.large_group_num; attr.default_ft = miss_fdb; attr.mapping = esw->offloads.reg_c0_obj_pool; + attr.fs_base_prio = FDB_BYPASS_PATH; chains = mlx5_chains_create(dev, &attr); if (IS_ERR(chains)) { @@ -2355,6 +2356,131 @@ static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode) mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); } +static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.offloads.drop_root) + return; + + esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n", + esw->fdb_table.offloads.drop_root->id, + esw->fdb_table.offloads.drop_root_fc->id); + mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule); + /* Don't free flow counter here, can be reused on a later activation */ + mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root); + esw->fdb_table.offloads.drop_root_rule = NULL; + esw->fdb_table.offloads.drop_root = NULL; +} + +static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_destination drop_fc_dst = {}; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_table *table; + int err = 0, dst_num = 0; + + if (esw->fdb_table.offloads.drop_root) + return 0; + + root_ns = esw->fdb_table.offloads.ns; + + ft_attr.prio = FDB_DROP_ROOT; + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(table)) { + esw_warn(dev, "Failed to create fdb drop root table, err %pe\n", + table); + return PTR_ERR(table); + } + + /* Drop FC reusable, create once on first deactivation of FDB */ + if (!esw->fdb_table.offloads.drop_root_fc) { + struct mlx5_fc *counter = mlx5_fc_create(dev, 0); + + err = PTR_ERR_OR_ZERO(counter); + if (err) + esw_warn(esw->dev, "create fdb drop fc err %d\n", err); + else + esw->fdb_table.offloads.drop_root_fc = counter; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + if (esw->fdb_table.offloads.drop_root_fc) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc; + dst = &drop_fc_dst; + dst_num++; + } + + flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num); + err = PTR_ERR_OR_ZERO(flow_rule); + if (err) { + esw_warn(esw->dev, + "fs offloads: Failed to add vport rx drop rule err %d\n", + err); + goto err_flow_rule; + } + + esw->fdb_table.offloads.drop_root = table; + esw->fdb_table.offloads.drop_root_rule = flow_rule; + esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n", + table->id, dst ? dst->counter->id : 0); + return 0; + +err_flow_rule: + /* no need to free drop fc, esw_offloads_steering_cleanup will do it */ + mlx5_destroy_flow_table(table); + return err; +} + +static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + + mlx5_esw_fdb_drop_destroy(esw); + mlx5_mpfs_enable(esw->dev); + + mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { + if (!vport->adjacent) + continue; + esw_debug(esw->dev, "Connecting vport %d to eswitch\n", + vport->vport); + mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true); + } + + esw->offloads_inactive = false; + esw_warn(esw->dev, "MPFS/FDB active\n"); +} + +static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + + mlx5_mpfs_disable(esw->dev); + mlx5_esw_fdb_drop_create(esw); + + mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { + if (!vport->adjacent) + continue; + esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n", + vport->vport); + + mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false); + } + + esw->offloads_inactive = true; + esw_warn(esw->dev, "MPFS/FDB inactive\n"); +} + static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { @@ -3438,6 +3564,10 @@ create_indir_err: static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) { + mlx5_esw_fdb_drop_destroy(esw); + if (esw->fdb_table.offloads.drop_root_fc) + mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc); + esw->fdb_table.offloads.drop_root_fc = NULL; esw_destroy_vport_rx_drop_rule(esw); esw_destroy_vport_rx_drop_group(esw); esw_destroy_vport_rx_group(esw); @@ -3556,10 +3686,11 @@ bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 cont int esw_offloads_enable(struct mlx5_eswitch *esw) { + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mapping_ctx *reg_c0_obj_pool; struct mlx5_vport *vport; unsigned long i; - u64 mapping_id; + u8 id_len; int err; mutex_init(&esw->offloads.termtbl_mutex); @@ -3581,9 +3712,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; - mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len); - reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_CHAIN, sizeof(struct mlx5_mapped_obj), ESW_REG_C0_USER_DATA_METADATA_MASK, true); @@ -3598,6 +3730,11 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_steering_init; + if (esw->offloads_inactive) + mlx5_esw_fdb_inactive(esw); + else + mlx5_esw_fdb_active(esw); + /* Representor will control the vport link state */ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; @@ -3664,6 +3801,9 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_metadata_uninit(esw); mlx5_rdma_disable_roce(esw->dev); mlx5_esw_adjacent_vhcas_cleanup(esw); + /* must be done after vhcas cleanup to avoid adjacent vports connect */ + if (esw->offloads_inactive) + mlx5_esw_fdb_active(esw); /* legacy mode always active */ mutex_destroy(&esw->offloads.termtbl_mutex); } @@ -3674,6 +3814,7 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) *mlx5_mode = MLX5_ESWITCH_LEGACY; break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: + case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE: *mlx5_mode = MLX5_ESWITCH_OFFLOADS; break; default: @@ -3683,14 +3824,17 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) return 0; } -static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) +static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode) { - switch (mlx5_mode) { + switch (esw->mode) { case MLX5_ESWITCH_LEGACY: *mode = DEVLINK_ESWITCH_MODE_LEGACY; break; case MLX5_ESWITCH_OFFLOADS: - *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + if (esw->offloads_inactive) + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE; + else + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; break; default: return -EINVAL; @@ -3796,6 +3940,45 @@ static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink, return ret; } +/* Returns true when only changing between active and inactive switchdev mode */ +static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw, + u16 devlink_mode) +{ + /* current mode is not switchdev */ + if (esw->mode != MLX5_ESWITCH_OFFLOADS) + return false; + + /* new mode is not switchdev */ + if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV && + devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE) + return false; + + /* already inactive: no change in current state */ + if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE && + esw->offloads_inactive) + return false; + + /* already active: no change in current state */ + if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && + !esw->offloads_inactive) + return false; + + down_write(&esw->mode_lock); + esw->offloads_inactive = !esw->offloads_inactive; + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + + if (esw->offloads_inactive) + mlx5_esw_fdb_inactive(esw); + else + mlx5_esw_fdb_active(esw); + + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; + up_write(&esw->mode_lock); + return true; +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { @@ -3810,12 +3993,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) { + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured."); return -EPERM; } + /* Avoid try_lock, active/inactive mode change is not restricted */ + if (mlx5_devlink_switchdev_active_mode_change(esw, mode)) + return 0; + mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { @@ -3838,7 +4025,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, esw->eswitch_operation_in_progress = true; up_write(&esw->mode_lock); - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) { NL_SET_ERR_MSG_MOD(extack, "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); @@ -3846,25 +4033,27 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, goto skip; } - if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + if (mlx5_mode == MLX5_ESWITCH_LEGACY) esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; mlx5_eswitch_disable_locked(esw); - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode while devlink traps are active"); err = -EOPNOTSUPP; goto skip; } + esw->offloads_inactive = + (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE); err = esw_offloads_start(esw, extack); - } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { + } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) { err = esw_offloads_stop(esw, extack); } else { err = -EINVAL; } skip: - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && err) + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err) mlx5_devlink_netdev_netns_immutable_set(devlink, false); down_write(&esw->mode_lock); esw->eswitch_operation_in_progress = false; @@ -3883,7 +4072,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - return esw_mode_to_devlink(esw->mode, mode); + return esw_mode_to_devlink(esw, mode); } static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2db3ffb0a2b2..2ca3bddbdf05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -3520,6 +3520,11 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_DROP_ROOT, 1); + err = PTR_ERR_OR_ZERO(maj_prio); + if (err) + goto out_err; + err = create_fdb_bypass(steering); if (err) goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 79ae3a51a4b3..0a6003fe60e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,23 @@ static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); +int mlx5i_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(dev); + + return mlx5e_hwtstamp_set(epriv, config, extack); +} + +int mlx5i_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(dev); + + return mlx5e_hwtstamp_get(epriv, config); +} + static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, @@ -52,7 +69,8 @@ static const struct net_device_ops mlx5i_netdev_ops = { .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, - .ndo_eth_ioctl = mlx5i_ioctl, + .ndo_hwtstamp_get = mlx5i_hwtstamp_get, + .ndo_hwtstamp_set = mlx5i_hwtstamp_set, }; /* IPoIB mlx5 netdev profile */ @@ -316,7 +334,7 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) int mlx5i_update_nic_rx(struct mlx5e_priv *priv) { - return mlx5e_refresh_tirs(priv, true, true); + return mlx5e_refresh_tirs(priv->mdev, true, true); } int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) @@ -409,6 +427,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + enum mlx5e_rx_res_features features; int err; priv->fs = mlx5e_fs_init(priv->profile, mdev, @@ -427,7 +446,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + features = MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK; + priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, + priv->drop_rq.rqn, &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (IS_ERR(priv->rx_res)) { @@ -557,20 +578,6 @@ int mlx5i_dev_init(struct net_device *dev) return 0; } -int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mlx5e_priv *priv = mlx5i_epriv(dev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx5e_hwstamp_set(priv, ifr); - case SIOCGHWTSTAMP: - return mlx5e_hwstamp_get(priv, ifr); - default: - return -EOPNOTSUPP; - } -} - void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 2ab6437a1c49..d67d5a72bb41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -88,7 +88,11 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); /* Shared ndo functions */ int mlx5i_dev_init(struct net_device *dev); void mlx5i_dev_cleanup(struct net_device *dev); -int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int mlx5i_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int mlx5i_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config); /* Parent profile functions */ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 028a76944d82..04444dad3a0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -140,7 +140,6 @@ static int mlx5i_pkey_close(struct net_device *netdev); static int mlx5i_pkey_dev_init(struct net_device *dev); static void mlx5i_pkey_dev_cleanup(struct net_device *netdev); static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu); -static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, @@ -149,7 +148,8 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, - .ndo_eth_ioctl = mlx5i_pkey_ioctl, + .ndo_hwtstamp_get = mlx5i_hwtstamp_get, + .ndo_hwtstamp_set = mlx5i_hwtstamp_set, }; /* Child NDOs */ @@ -184,11 +184,6 @@ static int mlx5i_pkey_dev_init(struct net_device *dev) return mlx5i_dev_init(dev); } -static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - return mlx5i_ioctl(dev, ifr, cmd); -} - static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) { mlx5i_parent_put(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 3db0387bf6dc..1ac933cd8f02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1418,10 +1418,12 @@ static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev) static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev) { struct mlx5_devcom_match_attr attr = { - .key.val = mlx5_query_nic_system_image_guid(dev), .flags = MLX5_DEVCOM_MATCH_FLAGS_NS, .net = mlx5_core_net(dev), }; + u8 len __always_unused; + + mlx5_query_nic_sw_system_image_guid(dev, attr.key.buf, &len); /* This component is use to sync adding core_dev to lag_dev and to sync * changes of mlx5_adev_devices between LAG layer and other layers. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 29e7fa09c32c..0ba0ef8bae42 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -1432,15 +1432,17 @@ static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared) return 0; } -static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key) +static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, + u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE]) { struct mlx5_core_dev *peer_dev, *next = NULL; - struct mlx5_devcom_match_attr attr = { - .key.val = key, - }; + struct mlx5_devcom_match_attr attr = {}; struct mlx5_devcom_comp_dev *compd; struct mlx5_devcom_comp_dev *pos; + BUILD_BUG_ON(MLX5_RT_CLOCK_IDENTITY_SIZE > MLX5_DEVCOM_MATCH_KEY_MAX); + memcpy(attr.key.buf, identity, MLX5_RT_CLOCK_IDENTITY_SIZE); + compd = mlx5_devcom_register_component(mdev->priv.devc, MLX5_DEVCOM_SHARED_CLOCK, &attr, NULL, mdev); @@ -1594,7 +1596,6 @@ int mlx5_init_clock(struct mlx5_core_dev *mdev) { u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE]; struct mlx5_clock_dev_state *clock_state; - u64 key; int err; if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { @@ -1610,12 +1611,10 @@ int mlx5_init_clock(struct mlx5_core_dev *mdev) mdev->clock_state = clock_state; if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) { - if (mlx5_clock_identity_get(mdev, identity)) { + if (mlx5_clock_identity_get(mdev, identity)) mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n"); - } else { - memcpy(&key, &identity, sizeof(key)); - mlx5_shared_clock_register(mdev, key); - } + else + mlx5_shared_clock_register(mdev, identity); } if (!mdev->clock) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index c18a652c0faa..aff3aed62c74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -54,7 +54,6 @@ struct mlx5_timer { struct mlx5_clock { seqlock_t lock; - struct hwtstamp_config hwtstamp_config; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 609c85f47917..91e5ae529d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -10,8 +10,10 @@ enum mlx5_devom_match_flags { MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0), }; +#define MLX5_DEVCOM_MATCH_KEY_MAX 32 union mlx5_devcom_match_key { u64 val; + u8 buf[MLX5_DEVCOM_MATCH_KEY_MAX]; }; struct mlx5_devcom_match_attr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 4450091e181a..4a88a42ae4f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -65,13 +65,14 @@ static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) /* UC L2 table hash node */ struct l2table_node { struct l2addr_node node; - u32 index; /* index in HW l2 table */ + int index; /* index in HW l2 table */ int ref_count; }; struct mlx5_mpfs { struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE]; struct mutex lock; /* Synchronize l2 table access */ + bool enabled; u32 size; unsigned long *bitmap; }; @@ -114,6 +115,8 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) return -ENOMEM; } + mpfs->enabled = true; + dev->priv.mpfs = mpfs; return 0; } @@ -135,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) struct mlx5_mpfs *mpfs = dev->priv.mpfs; struct l2table_node *l2addr; int err = 0; - u32 index; + int index; if (!mpfs) return 0; @@ -148,30 +151,34 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) goto out; } - err = alloc_l2table_index(mpfs, &index); - if (err) - goto out; - l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL); if (!l2addr) { err = -ENOMEM; - goto hash_add_err; + goto out; } - err = set_l2table_entry_cmd(dev, index, mac); - if (err) - goto set_table_entry_err; + index = -1; + + if (mpfs->enabled) { + err = alloc_l2table_index(mpfs, &index); + if (err) + goto hash_del; + err = set_l2table_entry_cmd(dev, index, mac); + if (err) + goto free_l2table_index; + mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n", + l2addr->node.addr, index); + } l2addr->index = index; l2addr->ref_count = 1; mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index); goto out; - -set_table_entry_err: - l2addr_hash_del(l2addr); -hash_add_err: +free_l2table_index: free_l2table_index(mpfs, index); +hash_del: + l2addr_hash_del(l2addr); out: mutex_unlock(&mpfs->lock); return err; @@ -183,7 +190,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) struct mlx5_mpfs *mpfs = dev->priv.mpfs; struct l2table_node *l2addr; int err = 0; - u32 index; + int index; if (!mpfs) return 0; @@ -200,12 +207,87 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) goto unlock; index = l2addr->index; - del_l2table_entry_cmd(dev, index); + if (index >= 0) { + del_l2table_entry_cmd(dev, index); + free_l2table_index(mpfs, index); + mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n", + mac, index); + } l2addr_hash_del(l2addr); - free_l2table_index(mpfs, index); mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index); unlock: mutex_unlock(&mpfs->lock); return err; } EXPORT_SYMBOL(mlx5_mpfs_del_mac); + +int mlx5_mpfs_enable(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + struct hlist_node *n; + int err = 0, i; + + if (!mpfs) + return -ENODEV; + + mutex_lock(&mpfs->lock); + if (mpfs->enabled) + goto out; + mpfs->enabled = true; + mlx5_core_dbg(dev, "MPFS enabling mpfs\n"); + + mlx5_mpfs_foreach(l2addr, n, mpfs, i) { + u32 index; + + err = alloc_l2table_index(mpfs, &index); + if (err) { + mlx5_core_err(dev, "Failed to allocated MPFS index for %pM, err(%d)\n", + l2addr->node.addr, err); + goto out; + } + + err = set_l2table_entry_cmd(dev, index, l2addr->node.addr); + if (err) { + mlx5_core_err(dev, "Failed to set MPFS l2table entry for %pM index=%d, err(%d)\n", + l2addr->node.addr, index, err); + free_l2table_index(mpfs, index); + goto out; + } + + l2addr->index = index; + mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n", + l2addr->node.addr, l2addr->index); + } +out: + mutex_unlock(&mpfs->lock); + return err; +} + +void mlx5_mpfs_disable(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + struct hlist_node *n; + int i; + + if (!mpfs) + return; + + mutex_lock(&mpfs->lock); + if (!mpfs->enabled) + goto unlock; + mlx5_mpfs_foreach(l2addr, n, mpfs, i) { + if (l2addr->index < 0) + continue; + del_l2table_entry_cmd(dev, l2addr->index); + free_l2table_index(mpfs, l2addr->index); + mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n", + l2addr->node.addr, l2addr->index); + l2addr->index = -1; + } + mpfs->enabled = false; + mlx5_core_dbg(dev, "MPFS disabled\n"); +unlock: + mutex_unlock(&mpfs->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h index 4a293542a7aa..9c63838ce1f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -45,6 +45,10 @@ struct l2addr_node { u8 addr[ETH_ALEN]; }; +#define mlx5_mpfs_foreach(hs, tmp, mpfs, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hs, tmp, &(mpfs)->hash[i], node.hlist) + #define for_each_l2hash_node(hn, tmp, hash, i) \ for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist) @@ -82,11 +86,16 @@ struct l2addr_node { }) #ifdef CONFIG_MLX5_MPFS +struct mlx5_core_dev; int mlx5_mpfs_init(struct mlx5_core_dev *dev); void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_mpfs_enable(struct mlx5_core_dev *dev); +void mlx5_mpfs_disable(struct mlx5_core_dev *dev); #else /* #ifndef CONFIG_MLX5_MPFS */ static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} +static inline int mlx5_mpfs_enable(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_mpfs_disable(struct mlx5_core_dev *dev) {} #endif #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70c156591b0b..c904696cbc3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -553,6 +553,7 @@ EXPORT_SYMBOL(mlx5_is_roce_on); static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) { + bool do_set = false; void *set_hca_cap; int err; @@ -563,17 +564,27 @@ static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) if (err) return err; - if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) || - !(dev->priv.sw_vhca_id > 0)) - return 0; - set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur, MLX5_ST_SZ_BYTES(cmd_hca_cap_2)); - MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1); - return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2); + if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) && + dev->priv.sw_vhca_id > 0) { + MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1); + do_set = true; + } + + if (MLX5_CAP_GEN_2_MAX(dev, lag_per_mp_group)) { + MLX5_SET(cmd_hca_cap_2, set_hca_cap, lag_per_mp_group, 1); + do_set = true; + } + + /* some FW versions that support querying MLX5_CAP_GENERAL_2 + * capabilities but don't support setting them. + * Skip unnecessary update to hca_cap_2 when no changes were introduced + */ + return do_set ? set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2) : 0; } static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 082259b56816..acef7d0ffa09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -444,6 +444,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev); void mlx5_uninit_one_light(struct mlx5_core_dev *dev); void mlx5_unload_one_light(struct mlx5_core_dev *dev); +void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf, + u8 *len); int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport, u16 opmod); #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 2ed2e530b07d..992873536c1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1190,6 +1190,25 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); +void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf, + u8 *len) +{ + u64 fw_system_image_guid; + + *len = 0; + + fw_system_image_guid = mlx5_query_nic_system_image_guid(mdev); + if (!fw_system_image_guid) + return; + + memcpy(buf, &fw_system_image_guid, sizeof(fw_system_image_guid)); + *len += sizeof(fw_system_image_guid); + + if (MLX5_CAP_GEN_2(mdev, load_balance_id) && + MLX5_CAP_GEN_2(mdev, lag_per_mp_group)) + buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id); +} + static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev, u16 vport_num, u16 *vhca_id) { diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index 3ba527514f1e..dff51f23d295 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -19,7 +19,7 @@ if NET_VENDOR_META config FBNIC tristate "Meta Platforms Host Network Interface" - depends on X86_64 || COMPILE_TEST + depends on 64BIT || COMPILE_TEST depends on !S390 depends on MAX_SKB_FRAGS < 22 depends on PCI_MSI diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index c87cb9ed09e7..1166fa17438d 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -878,11 +878,11 @@ msg_err: * @fbd: FBNIC device structure * @cmpl_data: Completion struct to store coredump * @offset: Offset into coredump requested - * @length: Length of section of cordeump to fetch + * @length: Length of section of coredump to fetch * * Return: zero on success, negative errno on failure * - * Asks the firmware to provide a section of the cordeump back in a message. + * Asks the firmware to provide a section of the coredump back in a message. * The response will have an offset and size matching the values provided. */ int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd, @@ -1868,7 +1868,7 @@ int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd) if (err) goto free_message; - /* Send message of to FW notifying it of current RPC config */ + /* Send message off to FW notifying it of current RPC config */ err = fbnic_mbx_map_tlv_msg(fbd, msg); if (err) goto free_message; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index a7a6b4db8016..4620f1847f2e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -185,7 +185,7 @@ static void fbnic_health_check(struct fbnic_dev *fbd) { struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - /* As long as the heart is beating the FW is healty */ + /* As long as the heart is beating the FW is healthy */ if (fbd->fw_heartbeat_enabled) return; @@ -196,7 +196,7 @@ static void fbnic_health_check(struct fbnic_dev *fbd) if (tx_mbx->head != tx_mbx->tail) return; - fbnic_devlink_fw_report(fbd, "Firmware crashed detected!"); + fbnic_devlink_fw_report(fbd, "Firmware crash detected!"); fbnic_devlink_otp_check(fbd, "error detected after firmware recovery"); if (fbnic_fw_config_after_crash(fbd)) @@ -378,7 +378,7 @@ free_fbd: * @pdev: PCI device information struct * * Called by the PCI subsystem to alert the driver that it should release - * a PCI device. The could be caused by a Hot-Plug event, or because the + * a PCI device. This could be caused by a Hot-Plug event, or because the * driver is going to be removed from memory. **/ static void fbnic_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c index 39d99677b71e..db7748189f45 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_time.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c @@ -253,7 +253,7 @@ static void fbnic_ptp_reset(struct fbnic_dev *fbd) void fbnic_time_init(struct fbnic_net *fbn) { - /* This is not really a statistic, but the lockng primitive fits + /* This is not really a statistic, but the locking primitive fits * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() / * WRITE_ONCE() behavior. */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h index c34bf87eeec9..3508b46ebdd0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h @@ -80,7 +80,7 @@ struct fbnic_tlv_index { enum fbnic_tlv_type type; }; -#define TLV_MAX_DATA (PAGE_SIZE - 512) +#define TLV_MAX_DATA ((PAGE_SIZE - 512) & 0xFFFF) #define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX #define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING } #define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index b1e8ce89870f..57e18a68f5d2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -887,6 +887,7 @@ static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem) *bdq_desc = cpu_to_le64(bd); bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) | FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1); + bdq_desc++; } while (--i); } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 40b1bfc600a7..582145713cfd 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -395,6 +395,8 @@ static int sparx5_create_port(struct sparx5 *sparx5, spx5_port->phylink = phylink; + spx5_port->ndev->dev.of_node = spx5_port->of_node; + return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 43f034e180c4..effe0a2f207a 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -528,6 +528,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_INIT_DONE: case GDMA_EQE_HWC_SOC_SERVICE: case GDMA_EQE_RNIC_QP_FATAL: + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: if (!eq->eq.callback) break; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index ada6c78a2bef..aa4e2731e2ba 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -118,6 +118,7 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, struct gdma_dev *gd = hwc->gdma_dev; union hwc_init_type_data type_data; union hwc_init_eq_id_db eq_db; + struct mana_context *ac; u32 type, val; int ret; @@ -196,6 +197,17 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, hwc->hwc_timeout = val; break; + case HWC_DATA_HW_LINK_CONNECT: + case HWC_DATA_HW_LINK_DISCONNECT: + ac = gd->gdma_context->mana.driver_data; + if (!ac) + break; + + WRITE_ONCE(ac->link_event, type); + schedule_work(&ac->link_change_work); + + break; + default: dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type); break; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 0142fd98392c..cccd5b63cee6 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -20,6 +20,7 @@ #include <net/mana/mana.h> #include <net/mana/mana_auxiliary.h> +#include <net/mana/hw_channel.h> static DEFINE_IDA(mana_adev_ida); @@ -84,7 +85,6 @@ static int mana_open(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); netdev_dbg(ndev, "%s successful\n", __func__); return 0; @@ -100,6 +100,46 @@ static int mana_close(struct net_device *ndev) return mana_detach(ndev, true); } +static void mana_link_state_handle(struct work_struct *w) +{ + struct mana_context *ac; + struct net_device *ndev; + u32 link_event; + bool link_up; + int i; + + ac = container_of(w, struct mana_context, link_change_work); + + rtnl_lock(); + + link_event = READ_ONCE(ac->link_event); + + if (link_event == HWC_DATA_HW_LINK_CONNECT) + link_up = true; + else if (link_event == HWC_DATA_HW_LINK_DISCONNECT) + link_up = false; + else + goto out; + + /* Process all ports */ + for (i = 0; i < ac->num_ports; i++) { + ndev = ac->ports[i]; + if (!ndev) + continue; + + if (link_up) { + netif_carrier_on(ndev); + + __netdev_notify_peers(ndev); + } else { + netif_carrier_off(ndev); + } + } + +out: + rtnl_unlock(); +} + static bool mana_can_tx(struct gdma_queue *wq) { return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; @@ -814,7 +854,7 @@ static int mana_shaper_del(struct net_shaper_binding *binding, /* Reset mana port context parameters */ apc->handle.id = 0; apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; - apc->speed = 0; + apc->speed = apc->max_speed; } return err; @@ -3059,9 +3099,6 @@ int mana_attach(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - if (apc->port_is_up) - netif_carrier_on(ndev); - netif_device_attach(ndev); return 0; @@ -3154,7 +3191,6 @@ int mana_detach(struct net_device *ndev, bool from_close) smp_wmb(); netif_tx_disable(ndev); - netif_carrier_off(ndev); if (apc->port_st_save) { err = mana_dealloc_queues(ndev); @@ -3243,6 +3279,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, goto free_indir; } + netif_carrier_on(ndev); + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); return 0; @@ -3431,6 +3469,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) if (!resuming) { ac->num_ports = num_ports; + + INIT_WORK(&ac->link_change_work, mana_link_state_handle); } else { if (ac->num_ports != num_ports) { dev_err(dev, "The number of vPorts changed: %d->%d\n", @@ -3438,6 +3478,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) err = -EPROTO; goto out; } + + enable_work(&ac->link_change_work); } if (ac->num_ports == 0) @@ -3500,6 +3542,8 @@ void mana_remove(struct gdma_dev *gd, bool suspending) int err; int i; + disable_work_sync(&ac->link_change_work); + /* adev currently doesn't support suspending, always remove it */ if (gd->adev) remove_adev(gd); diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig new file mode 100644 index 000000000000..0b3e853d625f --- /dev/null +++ b/drivers/net/ethernet/mucse/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Mucse network device configuration +# + +config NET_VENDOR_MUCSE + bool "Mucse devices" + default y + help + If you have a network (Ethernet) card from Mucse(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mucse(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_MUCSE + +config MGBE + tristate "Mucse(R) 1GbE PCI Express adapters support" + depends on PCI + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + <file:Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst>. + + To compile this driver as a module, choose M here. The module + will be called rnpgbe. + +endif # NET_VENDOR_MUCSE + diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile new file mode 100644 index 000000000000..675173fa05f7 --- /dev/null +++ b/drivers/net/ethernet/mucse/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2020 - 2025 MUCSE Corporation. +# +# Makefile for the MUCSE(R) network device drivers +# + +obj-$(CONFIG_MGBE) += rnpgbe/ diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile new file mode 100644 index 000000000000..de8bcb7772ab --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2020 - 2025 MUCSE Corporation. +# +# Makefile for the MUCSE(R) 1GbE PCI Express ethernet driver +# + +obj-$(CONFIG_MGBE) += rnpgbe.o +rnpgbe-objs := rnpgbe_main.o\ + rnpgbe_chip.o\ + rnpgbe_mbx.o\ + rnpgbe_mbx_fw.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h new file mode 100644 index 000000000000..5b024f9f7e17 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_H +#define _RNPGBE_H + +#include <linux/types.h> +#include <linux/mutex.h> + +enum rnpgbe_boards { + board_n500, + board_n210 +}; + +struct mucse_mbx_info { + u32 timeout_us; + u32 delay_us; + u16 fw_req; + u16 fw_ack; + /* lock for only one use mbx */ + struct mutex lock; + /* fw <--> pf mbx */ + u32 fwpf_shm_base; + u32 pf2fw_mbx_ctrl; + u32 fwpf_mbx_mask; + u32 fwpf_ctrl_base; +}; + +/* Enum for firmware notification modes, + * more modes (e.g., portup, link_report) will be added in future + **/ +enum { + mucse_fw_powerup, +}; + +struct mucse_hw { + void __iomem *hw_addr; + struct pci_dev *pdev; + struct mucse_mbx_info mbx; + int port; + u8 pfvfnum; +}; + +struct mucse_stats { + u64 tx_dropped; +}; + +struct mucse { + struct net_device *netdev; + struct pci_dev *pdev; + struct mucse_hw hw; + struct mucse_stats stats; +}; + +int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr); +int rnpgbe_reset_hw(struct mucse_hw *hw); +int rnpgbe_send_notify(struct mucse_hw *hw, + bool enable, + int mode); +int rnpgbe_init_hw(struct mucse_hw *hw, int board_type); + +/* Device IDs */ +#define PCI_VENDOR_ID_MUCSE 0x8848 +#define RNPGBE_DEVICE_ID_N500_QUAD_PORT 0x8308 +#define RNPGBE_DEVICE_ID_N500_DUAL_PORT 0x8318 +#define RNPGBE_DEVICE_ID_N210 0x8208 +#define RNPGBE_DEVICE_ID_N210L 0x820a + +#define mucse_hw_wr32(hw, reg, val) \ + writel((val), (hw)->hw_addr + (reg)) +#endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c new file mode 100644 index 000000000000..ebc7b3750157 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> + +#include "rnpgbe.h" +#include "rnpgbe_hw.h" +#include "rnpgbe_mbx.h" +#include "rnpgbe_mbx_fw.h" + +/** + * rnpgbe_get_permanent_mac - Get permanent mac + * @hw: hw information structure + * @perm_addr: pointer to store perm_addr + * + * rnpgbe_get_permanent_mac tries to get mac from hw + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr) +{ + struct device *dev = &hw->pdev->dev; + int err; + + err = mucse_mbx_get_macaddr(hw, hw->pfvfnum, perm_addr, hw->port); + if (err) { + dev_err(dev, "Failed to get MAC from FW %d\n", err); + return err; + } + + if (!is_valid_ether_addr(perm_addr)) { + dev_err(dev, "Failed to get valid MAC from FW\n"); + return -EINVAL; + } + + return 0; +} + +/** + * rnpgbe_reset_hw - Do a hardware reset + * @hw: hw information structure + * + * rnpgbe_reset_hw calls fw to do a hardware + * reset, and cleans some regs to default. + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_reset_hw(struct mucse_hw *hw) +{ + mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, 0); + return mucse_mbx_reset_hw(hw); +} + +/** + * rnpgbe_send_notify - Echo fw status + * @hw: hw information structure + * @enable: true or false status + * @mode: status mode + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_send_notify(struct mucse_hw *hw, + bool enable, + int mode) +{ + int err; + /* Keep switch struct to support more modes in the future */ + switch (mode) { + case mucse_fw_powerup: + err = mucse_mbx_powerup(hw, enable); + break; + default: + err = -EINVAL; + } + + return err; +} + +/** + * rnpgbe_init_n500 - Setup n500 hw info + * @hw: hw information structure + * + * rnpgbe_init_n500 initializes all private + * structure for n500 + **/ +static void rnpgbe_init_n500(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE; + mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE; +} + +/** + * rnpgbe_init_n210 - Setup n210 hw info + * @hw: hw information structure + * + * rnpgbe_init_n210 initializes all private + * structure for n210 + **/ +static void rnpgbe_init_n210(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE; + mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE; +} + +/** + * rnpgbe_init_hw - Setup hw info according to board_type + * @hw: hw information structure + * @board_type: board type + * + * rnpgbe_init_hw initializes all hw data + * + * Return: 0 on success, -EINVAL on failure + **/ +int rnpgbe_init_hw(struct mucse_hw *hw, int board_type) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + hw->port = 0; + + mbx->pf2fw_mbx_ctrl = MUCSE_GBE_PFFW_MBX_CTRL_OFFSET; + mbx->fwpf_mbx_mask = MUCSE_GBE_FWPF_MBX_MASK_OFFSET; + + switch (board_type) { + case board_n500: + rnpgbe_init_n500(hw); + break; + case board_n210: + rnpgbe_init_n210(hw); + break; + default: + return -EINVAL; + } + /* init_params with mbx base */ + mucse_init_mbx_params_pf(hw); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h new file mode 100644 index 000000000000..e77e6bc3d3e3 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_HW_H +#define _RNPGBE_HW_H + +#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00 +#define MUCSE_N500_FWPF_SHM_BASE 0x2d000 +#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500 +#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700 +#define MUCSE_N210_FWPF_CTRL_BASE 0x29400 +#define MUCSE_N210_FWPF_SHM_BASE 0x2d900 + +#define RNPGBE_DMA_AXI_EN 0x0010 + +#define RNPGBE_MAX_QUEUES 8 +#endif /* _RNPGBE_HW_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c new file mode 100644 index 000000000000..316f941629d4 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/pci.h> +#include <net/rtnetlink.h> +#include <linux/etherdevice.h> + +#include "rnpgbe.h" +#include "rnpgbe_hw.h" +#include "rnpgbe_mbx_fw.h" + +static const char rnpgbe_driver_name[] = "rnpgbe"; + +/* rnpgbe_pci_tbl - PCI Device ID Table + * + * { PCI_VDEVICE(Vendor ID, Device ID), + * private_data (used for different hw chip) } + */ +static struct pci_device_id rnpgbe_pci_tbl[] = { + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210), board_n210 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210L), board_n210 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_DUAL_PORT), board_n500 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_QUAD_PORT), board_n500 }, + /* required last entry */ + {0, }, +}; + +/** + * rnpgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). + * + * Return: 0 + **/ +static int rnpgbe_open(struct net_device *netdev) +{ + return 0; +} + +/** + * rnpgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * The close entry point is called when an interface is de-activated + * by the OS. + * + * Return: 0, this is not allowed to fail + **/ +static int rnpgbe_close(struct net_device *netdev) +{ + return 0; +} + +/** + * rnpgbe_xmit_frame - Send a skb to driver + * @skb: skb structure to be sent + * @netdev: network interface device structure + * + * Return: NETDEV_TX_OK + **/ +static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct mucse *mucse = netdev_priv(netdev); + + dev_kfree_skb_any(skb); + mucse->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static const struct net_device_ops rnpgbe_netdev_ops = { + .ndo_open = rnpgbe_open, + .ndo_stop = rnpgbe_close, + .ndo_start_xmit = rnpgbe_xmit_frame, +}; + +/** + * rnpgbe_add_adapter - Add netdev for this pci_dev + * @pdev: PCI device information structure + * @board_type: board type + * + * rnpgbe_add_adapter initializes a netdev for this pci_dev + * structure. Initializes Bar map, private structure, and a + * hardware reset occur. + * + * Return: 0 on success, negative errno on failure + **/ +static int rnpgbe_add_adapter(struct pci_dev *pdev, + int board_type) +{ + struct net_device *netdev; + u8 perm_addr[ETH_ALEN]; + void __iomem *hw_addr; + struct mucse *mucse; + struct mucse_hw *hw; + int err, err_notify; + + netdev = alloc_etherdev_mq(sizeof(struct mucse), RNPGBE_MAX_QUEUES); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + mucse = netdev_priv(netdev); + mucse->netdev = netdev; + mucse->pdev = pdev; + pci_set_drvdata(pdev, mucse); + + hw = &mucse->hw; + hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!hw_addr) { + err = -EIO; + goto err_free_net; + } + + hw->hw_addr = hw_addr; + hw->pdev = pdev; + + err = rnpgbe_init_hw(hw, board_type); + if (err) { + dev_err(&pdev->dev, "Init hw err %d\n", err); + goto err_free_net; + } + /* Step 1: Send power-up notification to firmware (no response expected) + * This informs firmware to initialize hardware power state, but + * firmware only acknowledges receipt without returning data. Must be + * done before synchronization as firmware may be in low-power idle + * state initially. + */ + err_notify = rnpgbe_send_notify(hw, true, mucse_fw_powerup); + if (err_notify) { + dev_warn(&pdev->dev, "Send powerup to hw failed %d\n", + err_notify); + dev_warn(&pdev->dev, "Maybe low performance\n"); + } + /* Step 2: Synchronize mailbox communication with firmware (requires + * response) After power-up, confirm firmware is ready to process + * requests with responses. This ensures subsequent request/response + * interactions work reliably. + */ + err = mucse_mbx_sync_fw(hw); + if (err) { + dev_err(&pdev->dev, "Sync fw failed! %d\n", err); + goto err_powerdown; + } + + netdev->netdev_ops = &rnpgbe_netdev_ops; + err = rnpgbe_reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "Hw reset failed %d\n", err); + goto err_powerdown; + } + + err = rnpgbe_get_permanent_mac(hw, perm_addr); + if (!err) { + eth_hw_addr_set(netdev, perm_addr); + } else if (err == -EINVAL) { + dev_warn(&pdev->dev, "Using random MAC\n"); + eth_hw_addr_random(netdev); + } else if (err) { + dev_err(&pdev->dev, "get perm_addr failed %d\n", err); + goto err_powerdown; + } + + err = register_netdev(netdev); + if (err) + goto err_powerdown; + + return 0; +err_powerdown: + /* notify powerdown only powerup ok */ + if (!err_notify) { + err_notify = rnpgbe_send_notify(hw, false, mucse_fw_powerup); + if (err_notify) + dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", + err_notify); + } +err_free_net: + free_netdev(netdev); + return err; +} + +/** + * rnpgbe_probe - Device initialization routine + * @pdev: PCI device information struct + * @id: entry in rnpgbe_pci_tbl + * + * rnpgbe_probe initializes a PF adapter identified by a pci_dev + * structure. + * + * Return: 0 on success, negative errno on failure + **/ +static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int board_type = id->driver_data; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting %d\n", err); + goto err_disable_dev; + } + + err = pci_request_mem_regions(pdev, rnpgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_disable_dev; + } + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) { + dev_err(&pdev->dev, "pci_save_state failed %d\n", err); + goto err_free_regions; + } + + err = rnpgbe_add_adapter(pdev, board_type); + if (err) + goto err_free_regions; + + return 0; +err_free_regions: + pci_release_mem_regions(pdev); +err_disable_dev: + pci_disable_device(pdev); + return err; +} + +/** + * rnpgbe_rm_adapter - Remove netdev for this mucse structure + * @pdev: PCI device information struct + * + * rnpgbe_rm_adapter remove a netdev for this mucse structure + **/ +static void rnpgbe_rm_adapter(struct pci_dev *pdev) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + struct mucse_hw *hw = &mucse->hw; + struct net_device *netdev; + int err; + + if (!mucse) + return; + netdev = mucse->netdev; + unregister_netdev(netdev); + err = rnpgbe_send_notify(hw, false, mucse_fw_powerup); + if (err) + dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err); + free_netdev(netdev); +} + +/** + * rnpgbe_remove - Device removal routine + * @pdev: PCI device information struct + * + * rnpgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnpgbe_remove(struct pci_dev *pdev) +{ + rnpgbe_rm_adapter(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +/** + * rnpgbe_dev_shutdown - Device shutdown routine + * @pdev: PCI device information struct + **/ +static void rnpgbe_dev_shutdown(struct pci_dev *pdev) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + struct net_device *netdev = mucse->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + rnpgbe_close(netdev); + rtnl_unlock(); + pci_disable_device(pdev); +} + +/** + * rnpgbe_shutdown - Device shutdown routine + * @pdev: PCI device information struct + * + * rnpgbe_shutdown is called by the PCI subsystem to alert the driver + * that os shutdown. Device should setup wakeup state here. + **/ +static void rnpgbe_shutdown(struct pci_dev *pdev) +{ + rnpgbe_dev_shutdown(pdev); +} + +static struct pci_driver rnpgbe_driver = { + .name = rnpgbe_driver_name, + .id_table = rnpgbe_pci_tbl, + .probe = rnpgbe_probe, + .remove = rnpgbe_remove, + .shutdown = rnpgbe_shutdown, +}; + +module_pci_driver(rnpgbe_driver); + +MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl); +MODULE_AUTHOR("Yibo Dong, <dong100@mucse.com>"); +MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c new file mode 100644 index 000000000000..de5e29230b3c --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2025 Mucse Corporation. */ + +#include <linux/errno.h> +#include <linux/bitfield.h> +#include <linux/iopoll.h> + +#include "rnpgbe_mbx.h" + +/** + * mbx_data_rd32 - Reads reg with base mbx->fwpf_shm_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * + * Return: register value + **/ +static u32 mbx_data_rd32(struct mucse_mbx_info *mbx, u32 reg) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + return readl(hw->hw_addr + mbx->fwpf_shm_base + reg); +} + +/** + * mbx_data_wr32 - Writes value to reg with base mbx->fwpf_shm_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * @value: value to be written + * + **/ +static void mbx_data_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + writel(value, hw->hw_addr + mbx->fwpf_shm_base + reg); +} + +/** + * mbx_ctrl_rd32 - Reads reg with base mbx->fwpf_ctrl_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * + * Return: register value + **/ +static u32 mbx_ctrl_rd32(struct mucse_mbx_info *mbx, u32 reg) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + return readl(hw->hw_addr + mbx->fwpf_ctrl_base + reg); +} + +/** + * mbx_ctrl_wr32 - Writes value to reg with base mbx->fwpf_ctrl_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * @value: value to be written + * + **/ +static void mbx_ctrl_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + writel(value, hw->hw_addr + mbx->fwpf_ctrl_base + reg); +} + +/** + * mucse_mbx_get_lock_pf - Write ctrl and read back lock status + * @hw: pointer to the HW structure + * + * Return: register value after write + **/ +static u32 mucse_mbx_get_lock_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx); + + mbx_ctrl_wr32(mbx, reg, MUCSE_MBX_PFU); + + return mbx_ctrl_rd32(mbx, reg); +} + +/** + * mucse_obtain_mbx_lock_pf - Obtain mailbox lock + * @hw: pointer to the HW structure + * + * Pair with mucse_release_mbx_lock_pf() + * This function maybe used in an irq handler. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_obtain_mbx_lock_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 val; + + return read_poll_timeout_atomic(mucse_mbx_get_lock_pf, + val, val & MUCSE_MBX_PFU, + mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_release_mbx_lock_pf - Release mailbox lock + * @hw: pointer to the HW structure + * @req: send a request or not + * + * Pair with mucse_obtain_mbx_lock_pf(): + * - Releases the mailbox lock by clearing MUCSE_MBX_PFU bit + * - Simultaneously sends the request by setting MUCSE_MBX_REQ bit + * if req is true + * (Both bits are in the same mailbox control register, + * so operations are combined) + **/ +static void mucse_release_mbx_lock_pf(struct mucse_hw *hw, bool req) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx); + + mbx_ctrl_wr32(mbx, reg, req ? MUCSE_MBX_REQ : 0); +} + +/** + * mucse_mbx_get_fwreq - Read fw req from reg + * @mbx: pointer to the mbx structure + * + * Return: the fwreq value + **/ +static u16 mucse_mbx_get_fwreq(struct mucse_mbx_info *mbx) +{ + u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + + return FIELD_GET(GENMASK_U32(15, 0), val); +} + +/** + * mucse_mbx_inc_pf_ack - Increase ack + * @hw: pointer to the HW structure + * + * mucse_mbx_inc_pf_ack reads pf_ack from hw, then writes + * new value back after increase + **/ +static void mucse_mbx_inc_pf_ack(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 ack; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT); + ack = FIELD_GET(GENMASK_U32(31, 16), val); + ack++; + val &= ~GENMASK_U32(31, 16); + val |= FIELD_PREP(GENMASK_U32(31, 16), ack); + mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val); +} + +/** + * mucse_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * mucse_read_mbx_pf copies a message from the mbx buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a fw request so no polling for message is needed. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_read_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size) +{ + const int size_in_words = size / sizeof(u32); + struct mucse_mbx_info *mbx = &hw->mbx; + int err; + + err = mucse_obtain_mbx_lock_pf(hw); + if (err) + return err; + + for (int i = 0; i < size_in_words; i++) + msg[i] = mbx_data_rd32(mbx, MUCSE_MBX_FWPF_SHM + 4 * i); + /* Hw needs write data_reg at last */ + mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM, 0); + /* flush reqs as we have read this request data */ + hw->mbx.fw_req = mucse_mbx_get_fwreq(mbx); + mucse_mbx_inc_pf_ack(hw); + mucse_release_mbx_lock_pf(hw, false); + + return 0; +} + +/** + * mucse_check_for_msg_pf - Check to see if the fw has sent mail + * @hw: pointer to the HW structure + * + * Return: 0 if the fw has set the Status bit or else -EIO + **/ +static int mucse_check_for_msg_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 fw_req; + + fw_req = mucse_mbx_get_fwreq(mbx); + /* chip's register is reset to 0 when rc send reset + * mbx command. Return -EIO if in this state, others + * fw == hw->mbx.fw_req means no new msg. + **/ + if (fw_req == 0 || fw_req == hw->mbx.fw_req) + return -EIO; + + return 0; +} + +/** + * mucse_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_poll_for_msg(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int val; + + return read_poll_timeout(mucse_check_for_msg_pf, + val, !val, mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_poll_and_read_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully received a message notification and + * copied it into the receive buffer, negative errno on failure + **/ +int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size) +{ + int err; + + err = mucse_poll_for_msg(hw); + if (err) + return err; + + return mucse_read_mbx_pf(hw, msg, size); +} + +/** + * mucse_mbx_get_fwack - Read fw ack from reg + * @mbx: pointer to the MBX structure + * + * Return: the fwack value + **/ +static u16 mucse_mbx_get_fwack(struct mucse_mbx_info *mbx) +{ + u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + + return FIELD_GET(GENMASK_U32(31, 16), val); +} + +/** + * mucse_mbx_inc_pf_req - Increase req + * @hw: pointer to the HW structure + * + * mucse_mbx_inc_pf_req reads pf_req from hw, then writes + * new value back after increase + **/ +static void mucse_mbx_inc_pf_req(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 req; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT); + req = FIELD_GET(GENMASK_U32(15, 0), val); + req++; + val &= ~GENMASK_U32(15, 0); + val |= FIELD_PREP(GENMASK_U32(15, 0), req); + mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val); +} + +/** + * mucse_write_mbx_pf - Place a message in the mailbox + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully copied message into the buffer, + * negative errno on failure + **/ +static int mucse_write_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size) +{ + const int size_in_words = size / sizeof(u32); + struct mucse_mbx_info *mbx = &hw->mbx; + int err; + + err = mucse_obtain_mbx_lock_pf(hw); + if (err) + return err; + + for (int i = 0; i < size_in_words; i++) + mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM + i * 4, msg[i]); + + /* flush acks as we are overwriting the message buffer */ + hw->mbx.fw_ack = mucse_mbx_get_fwack(mbx); + mucse_mbx_inc_pf_req(hw); + mucse_release_mbx_lock_pf(hw, true); + + return 0; +} + +/** + * mucse_check_for_ack_pf - Check to see if the fw has ACKed + * @hw: pointer to the HW structure + * + * Return: 0 if the fw has set the Status bit or else -EIO + **/ +static int mucse_check_for_ack_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 fw_ack; + + fw_ack = mucse_mbx_get_fwack(mbx); + /* chip's register is reset to 0 when rc send reset + * mbx command. Return -EIO if in this state, others + * fw_ack == hw->mbx.fw_ack means no new ack. + **/ + if (fw_ack == 0 || fw_ack == hw->mbx.fw_ack) + return -EIO; + + return 0; +} + +/** + * mucse_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * + * Return: 0 if it successfully received a message acknowledgment, + * else negative errno + **/ +static int mucse_poll_for_ack(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int val; + + return read_poll_timeout(mucse_check_for_ack_pf, + val, !val, mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_write_and_wait_ack_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout_cnt period + **/ +int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size) +{ + int err; + + err = mucse_write_mbx_pf(hw, msg, size); + if (err) + return err; + + return mucse_poll_for_ack(hw); +} + +/** + * mucse_mbx_reset - Reset mbx info, sync info from regs + * @hw: pointer to the HW structure + * + * mucse_mbx_reset resets all mbx variables to default. + **/ +static void mucse_mbx_reset(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + hw->mbx.fw_req = FIELD_GET(GENMASK_U32(15, 0), val); + hw->mbx.fw_ack = FIELD_GET(GENMASK_U32(31, 16), val); + mbx_ctrl_wr32(mbx, MUCSE_MBX_PF2FW_CTRL(mbx), 0); + mbx_ctrl_wr32(mbx, MUCSE_MBX_FWPF_MASK(mbx), GENMASK_U32(31, 16)); +} + +/** + * mucse_init_mbx_params_pf - Set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void mucse_init_mbx_params_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->delay_us = 100; + mbx->timeout_us = 4 * USEC_PER_SEC; + mutex_init(&mbx->lock); + mucse_mbx_reset(hw); +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h new file mode 100644 index 000000000000..e6fcc8d1d3ca --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_H +#define _RNPGBE_MBX_H + +#include "rnpgbe.h" + +#define MUCSE_MBX_FW2PF_CNT 0 +#define MUCSE_MBX_PF2FW_CNT 4 +#define MUCSE_MBX_FWPF_SHM 8 +#define MUCSE_MBX_PF2FW_CTRL(mbx) ((mbx)->pf2fw_mbx_ctrl) +#define MUCSE_MBX_FWPF_MASK(mbx) ((mbx)->fwpf_mbx_mask) +#define MUCSE_MBX_REQ BIT(0) /* Request a req to mailbox */ +#define MUCSE_MBX_PFU BIT(3) /* PF owns the mailbox buffer */ + +int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size); +void mucse_init_mbx_params_pf(struct mucse_hw *hw); +int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size); +#endif /* _RNPGBE_MBX_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c new file mode 100644 index 000000000000..8c8bd5e8e1db --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/if_ether.h> +#include <linux/bitfield.h> + +#include "rnpgbe.h" +#include "rnpgbe_mbx.h" +#include "rnpgbe_mbx_fw.h" + +/** + * mucse_fw_send_cmd_wait_resp - Send cmd req and wait for response + * @hw: pointer to the HW structure + * @req: pointer to the cmd req structure + * @reply: pointer to the fw reply structure + * + * mucse_fw_send_cmd_wait_resp sends req to pf-fw mailbox and wait + * reply from fw. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_fw_send_cmd_wait_resp(struct mucse_hw *hw, + struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int len = le16_to_cpu(req->datalen); + int retry_cnt = 3; + int err; + + mutex_lock(&hw->mbx.lock); + err = mucse_write_and_wait_ack_mbx(hw, (u32 *)req, len); + if (err) + goto out; + do { + err = mucse_poll_and_read_mbx(hw, (u32 *)reply, + sizeof(*reply)); + if (err) + goto out; + /* mucse_write_and_wait_ack_mbx return 0 means fw has + * received request, wait for the expect opcode + * reply with 'retry_cnt' times. + */ + } while (--retry_cnt >= 0 && reply->opcode != req->opcode); +out: + mutex_unlock(&hw->mbx.lock); + if (!err && retry_cnt < 0) + return -ETIMEDOUT; + if (!err && reply->error_code) + return -EIO; + + return err; +} + +/** + * mucse_mbx_get_info - Get hw info from fw + * @hw: pointer to the HW structure + * + * mucse_mbx_get_info tries to get hw info from hw. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_mbx_get_info(struct mucse_hw *hw) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(GET_HW_INFO), + }; + struct mbx_fw_cmd_reply reply = {}; + int err; + + err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply); + if (!err) + hw->pfvfnum = FIELD_GET(GENMASK_U16(7, 0), + le16_to_cpu(reply.hw_info.pfnum)); + + return err; +} + +/** + * mucse_mbx_sync_fw - Try to sync with fw + * @hw: pointer to the HW structure + * + * mucse_mbx_sync_fw tries to sync with fw. It is only called in + * probe. Nothing (register network) todo if failed. + * Try more times to do sync. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_sync_fw(struct mucse_hw *hw) +{ + int try_cnt = 3; + int err; + + do { + err = mucse_mbx_get_info(hw); + } while (err == -ETIMEDOUT && try_cnt--); + + return err; +} + +/** + * mucse_mbx_powerup - Echo fw to powerup + * @hw: pointer to the HW structure + * @is_powerup: true for powerup, false for powerdown + * + * mucse_mbx_powerup echo fw to change working frequency + * to normal after received true, and reduce working frequency + * if false. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(sizeof(req.powerup) + + MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(POWER_UP), + .powerup = { + /* fw needs this to reply correct cmd */ + .version = cpu_to_le32(GENMASK_U32(31, 0)), + .status = cpu_to_le32(is_powerup ? 1 : 0), + }, + }; + int len, err; + + len = le16_to_cpu(req.datalen); + mutex_lock(&hw->mbx.lock); + err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len); + mutex_unlock(&hw->mbx.lock); + + return err; +} + +/** + * mucse_mbx_reset_hw - Posts a mbx req to reset hw + * @hw: pointer to the HW structure + * + * mucse_mbx_reset_hw posts a mbx req to firmware to reset hw. + * We use mucse_fw_send_cmd_wait_resp to wait hw reset ok. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_reset_hw(struct mucse_hw *hw) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(RESET_HW), + }; + struct mbx_fw_cmd_reply reply = {}; + + return mucse_fw_send_cmd_wait_resp(hw, &req, &reply); +} + +/** + * mucse_mbx_get_macaddr - Posts a mbx req to request macaddr + * @hw: pointer to the HW structure + * @pfvfnum: index of pf/vf num + * @mac_addr: pointer to store mac_addr + * @port: port index + * + * mucse_mbx_get_macaddr posts a mbx req to firmware to get mac_addr. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, + int port) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(sizeof(req.get_mac_addr) + + MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(GET_MAC_ADDRESS), + .get_mac_addr = { + .port_mask = cpu_to_le32(BIT(port)), + .pfvf_num = cpu_to_le32(pfvfnum), + }, + }; + struct mbx_fw_cmd_reply reply = {}; + int err; + + err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply); + if (err) + return err; + + if (le32_to_cpu(reply.mac_addr.ports) & BIT(port)) + memcpy(mac_addr, reply.mac_addr.addrs[port].mac, ETH_ALEN); + else + return -ENODATA; + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h new file mode 100644 index 000000000000..fb24fc12b613 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_FW_H +#define _RNPGBE_MBX_FW_H + +#include <linux/types.h> + +#include "rnpgbe.h" + +#define MUCSE_MBX_REQ_HDR_LEN 24 + +enum MUCSE_FW_CMD { + GET_HW_INFO = 0x0601, + GET_MAC_ADDRESS = 0x0602, + RESET_HW = 0x0603, + POWER_UP = 0x0803, +}; + +struct mucse_hw_info { + u8 link_stat; + u8 port_mask; + __le32 speed; + __le16 phy_type; + __le16 nic_mode; + __le16 pfnum; + __le32 fw_version; + __le32 axi_mhz; + union { + u8 port_id[4]; + __le32 port_ids; + }; + __le32 bd_uid; + __le32 phy_id; + __le32 wol_status; + __le32 ext_info; +} __packed; + +struct mbx_fw_cmd_req { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 ret_value; + __le32 cookie_lo; + __le32 cookie_hi; + __le32 reply_lo; + __le32 reply_hi; + union { + u8 data[32]; + struct { + __le32 version; + __le32 status; + } powerup; + struct { + __le32 port_mask; + __le32 pfvf_num; + } get_mac_addr; + }; +} __packed; + +struct mbx_fw_cmd_reply { + __le16 flags; + __le16 opcode; + __le16 error_code; + __le16 datalen; + __le32 cookie_lo; + __le32 cookie_hi; + union { + u8 data[40]; + struct mac_addr { + __le32 ports; + struct _addr { + /* for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + u8 mac[8]; + } addrs[4]; + } mac_addr; + struct mucse_hw_info hw_info; + }; +} __packed; + +int mucse_mbx_sync_fw(struct mucse_hw *hw); +int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup); +int mucse_mbx_reset_hw(struct mucse_hw *hw); +int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, int port); +#endif /* _RNPGBE_MBX_FW_H */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index e5a6f59af0b6..62f05f4569b1 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -198,23 +198,21 @@ pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED); } -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int pch_gbe_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct hwtstamp_config cfg; struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev; u8 station[20]; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON) return -ERANGE; - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: adapter->hwts_rx_en = 0; break; @@ -223,17 +221,17 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0); break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); strcpy(station, PTP_L4_MULTICAST_SA); pch_set_station_address(station, pdev); break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); strcpy(station, PTP_L2_MULTICAST_SA); pch_set_station_address(station, pdev); @@ -242,12 +240,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } - adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + adapter->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON; /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; +} + +static int pch_gbe_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + cfg->tx_type = adapter->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = adapter->hwts_rx_en; + + return 0; } static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) @@ -2234,9 +2243,6 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) netdev_dbg(netdev, "cmd : 0x%04x\n", cmd); - if (cmd == SIOCSHWTSTAMP) - return hwtstamp_ioctl(netdev, ifr, cmd); - return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); } @@ -2328,6 +2334,8 @@ static const struct net_device_ops pch_gbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pch_gbe_netpoll, #endif + .ndo_hwtstamp_get = pch_gbe_hwtstamp_get, + .ndo_hwtstamp_set = pch_gbe_hwtstamp_set, }; static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index b28966ae50c2..058eea86e141 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2335,20 +2335,6 @@ static int ionic_stop(struct net_device *netdev) return 0; } -static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct ionic_lif *lif = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return ionic_lif_hwstamp_set(lif, ifr); - case SIOCGHWTSTAMP: - return ionic_lif_hwstamp_get(lif, ifr); - default: - return -EOPNOTSUPP; - } -} - static int ionic_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivf) { @@ -2812,7 +2798,6 @@ static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, - .ndo_eth_ioctl = ionic_eth_ioctl, .ndo_start_xmit = ionic_start_xmit, .ndo_bpf = ionic_xdp, .ndo_xdp_xmit = ionic_xdp_xmit, @@ -2833,6 +2818,8 @@ static const struct net_device_ops ionic_netdev_ops = { .ndo_get_vf_config = ionic_get_vf_config, .ndo_set_vf_link_state = ionic_set_vf_link_state, .ndo_get_vf_stats = ionic_get_vf_stats, + .ndo_hwtstamp_get = ionic_hwstamp_get, + .ndo_hwtstamp_set = ionic_hwstamp_set, }; static int ionic_cmb_reconfig(struct ionic_lif *lif, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 43bdd0fb8733..8e10f66dc50e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -6,7 +6,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> -#include <uapi/linux/net_tstamp.h> +#include <linux/net_tstamp.h> #include <linux/dim.h> #include <linux/pci.h> #include "ionic_rx_filter.h" @@ -254,7 +254,7 @@ struct ionic_phc { struct timecounter tc; struct mutex config_lock; /* lock for ts_config */ - struct hwtstamp_config ts_config; + struct kernel_hwtstamp_config ts_config; u64 ts_config_rx_filt; u32 ts_config_tx_mode; @@ -362,8 +362,11 @@ int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void ionic_lif_hwstamp_replay(struct ionic_lif *lif); void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); -int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); -int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); +int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); void ionic_lif_register_phc(struct ionic_lif *lif); void ionic_lif_unregister_phc(struct ionic_lif *lif); @@ -373,12 +376,15 @@ void ionic_lif_free_phc(struct ionic_lif *lif); static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} -static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +static inline int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } -static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +static inline int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index 9f5c81d44f99..05b44fc482f8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -65,11 +65,12 @@ static u64 ionic_hwstamp_rx_filt(int config_rx_filter) } static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, - struct hwtstamp_config *new_ts) + struct kernel_hwtstamp_config *new_ts, + struct netlink_ext_ack *extack) { + struct kernel_hwtstamp_config *config; + struct kernel_hwtstamp_config ts = {}; struct ionic *ionic = lif->ionic; - struct hwtstamp_config *config; - struct hwtstamp_config ts; int tx_mode = 0; u64 rx_filt = 0; int err, err2; @@ -99,12 +100,16 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, tx_mode = ionic_hwstamp_tx_mode(config->tx_type); if (tx_mode < 0) { + NL_SET_ERR_MSG_MOD(extack, + "TX time stamping mode isn't supported"); err = tx_mode; goto err_queues; } mask = cpu_to_le64(BIT_ULL(tx_mode)); if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) { + NL_SET_ERR_MSG_MOD(extack, + "TX time stamping mode isn't supported"); err = -ERANGE; goto err_queues; } @@ -124,32 +129,47 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, if (tx_mode) { err = ionic_lif_create_hwstamp_txq(lif); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error creating TX timestamp queue"); goto err_queues; + } } if (rx_filt) { err = ionic_lif_create_hwstamp_rxq(lif); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error creating RX timestamp queue"); goto err_queues; + } } if (tx_mode != lif->phc->ts_config_tx_mode) { err = ionic_lif_set_hwstamp_txmode(lif, tx_mode); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling TX timestamp mode"); goto err_txmode; + } } if (rx_filt != lif->phc->ts_config_rx_filt) { err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling RX timestamp mode"); goto err_rxfilt; + } } if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) { err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling RX timestamp mode"); goto err_rxall; + } } memcpy(&lif->phc->ts_config, config, sizeof(*config)); @@ -183,28 +203,24 @@ err_queues: return err; } -int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct ionic_lif *lif = netdev_priv(netdev); int err; if (!lif->phc || !lif->phc->ptp) return -EOPNOTSUPP; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - mutex_lock(&lif->queue_lock); - err = ionic_lif_hwstamp_set_ts_config(lif, &config); + err = ionic_lif_hwstamp_set_ts_config(lif, config, extack); mutex_unlock(&lif->queue_lock); if (err) { netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); return err; } - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } @@ -216,7 +232,7 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif) return; mutex_lock(&lif->queue_lock); - err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + err = ionic_lif_hwstamp_set_ts_config(lif, NULL, NULL); mutex_unlock(&lif->queue_lock); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); @@ -246,19 +262,18 @@ void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) mutex_unlock(&lif->phc->config_lock); } -int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; + struct ionic_lif *lif = netdev_priv(netdev); if (!lif->phc || !lif->phc->ptp) return -EOPNOTSUPP; mutex_lock(&lif->phc->config_lock); - memcpy(&config, &lif->phc->ts_config, sizeof(config)); + memcpy(config, &lif->phc->ts_config, sizeof(*config)); mutex_unlock(&lif->phc->config_lock); - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; return 0; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d18734fe12e4..0b96b6aa4214 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -57,6 +57,7 @@ #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" #define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw" #define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw" +#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw" #define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" @@ -110,6 +111,7 @@ static const struct rtl_chip_info { { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 }, /* 8125D family. */ + { 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 }, { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 }, { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 }, @@ -770,6 +772,7 @@ MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); MODULE_FIRMWARE(FIRMWARE_8125D_1); MODULE_FIRMWARE(FIRMWARE_8125D_2); +MODULE_FIRMWARE(FIRMWARE_8125K_1); MODULE_FIRMWARE(FIRMWARE_8125BP_2); MODULE_FIRMWARE(FIRMWARE_8126A_2); MODULE_FIRMWARE(FIRMWARE_8126A_3); @@ -4995,9 +4998,7 @@ static int rtl8169_resume(struct device *device) clk_prepare_enable(tp->clk); /* Some chip versions may truncate packets without this initialization */ - if (tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_46) - rtl_init_rxcfg(tp); + rtl_init_rxcfg(tp); return rtl8169_runtime_resume(device); } diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 7b48060c250b..5e56ec9b1013 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -35,16 +35,6 @@ /* Driver's parameters */ #define RAVB_ALIGN 128 -/* Hardware time stamp */ -#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */ -#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */ - -#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */ -#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */ -#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002 -#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006 -#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */ - enum ravb_reg { /* AVB-DMAC registers */ CCC = 0x0000, @@ -1017,7 +1007,6 @@ enum CSR2_BIT { #define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \ CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6) -#define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 @@ -1062,6 +1051,7 @@ struct ravb_hw_info { u32 rx_max_frame_size; u32 rx_buffer_size; u32 rx_desc_size; + u32 dbat_entry_num; unsigned aligned_tx: 1; unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */ @@ -1114,8 +1104,8 @@ struct ravb_private { u32 rx_over_errors; u32 rx_fifo_errors; struct net_device_stats stats[NUM_RX_QUEUE]; - u32 tstamp_tx_ctrl; - u32 tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; struct list_head ts_skb_list; u32 ts_skb_tag; struct ravb_ptp ptp; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index e2d7ce1a85e8..57b0db314fb5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -946,6 +946,30 @@ refill: return rx_packets; } +static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q, + struct ravb_ex_rx_desc *desc, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct timespec64 ts; + bool get_ts; + + if (q == RAVB_NC) + get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; + else + get_ts = priv->tstamp_rx_ctrl == HWTSTAMP_FILTER_ALL; + + if (!get_ts) + return; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + ts.tv_sec = ((u64)le16_to_cpu(desc->ts_sh) << 32) + | le32_to_cpu(desc->ts_sl); + ts.tv_nsec = le32_to_cpu(desc->ts_n); + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); +} + /* Packet receive function for Ethernet AVB */ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) { @@ -955,7 +979,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) struct ravb_ex_rx_desc *desc; unsigned int limit, i; struct sk_buff *skb; - struct timespec64 ts; int rx_packets = 0; u8 desc_status; u16 pkt_len; @@ -992,7 +1015,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) if (desc_status & MSC_CEEF) stats->rx_missed_errors++; } else { - u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; struct ravb_rx_buffer *rx_buff; void *rx_addr; @@ -1010,19 +1032,8 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) break; } skb_mark_for_recycle(skb); - get_ts &= (q == RAVB_NC) ? - RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : - ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; - if (get_ts) { - struct skb_shared_hwtstamps *shhwtstamps; - - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << - 32) | le32_to_cpu(desc->ts_sl); - ts.tv_nsec = le32_to_cpu(desc->ts_n); - shhwtstamps->hwtstamp = timespec64_to_ktime(ts); - } + + ravb_rx_rcar_hwstamp(priv, q, desc, skb); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); @@ -1975,7 +1986,6 @@ out_ptp_stop: out_set_reset: ravb_set_opmode(ndev, CCC_OPC_RESET); out_rpm_put: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); out_napi_off: if (info->nc_queues) @@ -2404,95 +2414,55 @@ static int ravb_close(struct net_device *ndev) if (error) return error; - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; } -static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) +static int ravb_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct ravb_private *priv = netdev_priv(ndev); - struct hwtstamp_config config; - config.flags = 0; - config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : - HWTSTAMP_TX_OFF; - switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { - case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RAVB_RXTSTAMP_TYPE_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config.rx_filter = HWTSTAMP_FILTER_NONE; - } + config->flags = 0; + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } /* Control hardware time stamping */ -static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) +static int ravb_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct ravb_private *priv = netdev_priv(ndev); - struct hwtstamp_config config; - u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; - u32 tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; - if (copy_from_user(&config, req->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: - config.rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; } priv->tstamp_tx_ctrl = tstamp_tx_ctrl; priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -/* ioctl to device function */ -static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) -{ - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGHWTSTAMP: - return ravb_hwtstamp_get(ndev, req); - case SIOCSHWTSTAMP: - return ravb_hwtstamp_set(ndev, req); - } - - return phy_mii_ioctl(phydev, req, cmd); + return 0; } static int ravb_change_mtu(struct net_device *ndev, int new_mtu) @@ -2628,11 +2598,13 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_get_stats = ravb_get_stats, .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, - .ndo_eth_ioctl = ravb_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = ravb_set_features, + .ndo_hwtstamp_get = ravb_hwtstamp_get, + .ndo_hwtstamp_set = ravb_hwtstamp_set, }; /* MDIO bus init function */ @@ -2714,6 +2686,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .aligned_tx = 1, .gptp = 1, .nc_queues = 1, @@ -2737,6 +2710,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, @@ -2763,6 +2737,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, @@ -2789,6 +2764,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .multi_irqs = 1, .err_mgmt_irqs = 1, .gptp = 1, @@ -2814,6 +2790,7 @@ static const struct ravb_hw_info gbeth_hw_info = { .rx_max_frame_size = SZ_8K, .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), + .dbat_entry_num = 2, .aligned_tx = 1, .coalesce_irqs = 1, .tx_counters = 1, @@ -2941,13 +2918,14 @@ static int ravb_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(rstc), "failed to get cpg reset\n"); + info = of_device_get_match_data(&pdev->dev); + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), - NUM_TX_QUEUE, NUM_RX_QUEUE); + info->nc_queues ? NUM_TX_QUEUE : 1, + info->nc_queues ? NUM_RX_QUEUE : 1); if (!ndev) return -ENOMEM; - info = of_device_get_match_data(&pdev->dev); - ndev->features = info->net_features; ndev->hw_features = info->net_hw_features; ndev->vlan_features = info->vlan_features; @@ -3045,7 +3023,7 @@ static int ravb_probe(struct platform_device *pdev) ravb_parse_delay_mode(np, ndev); /* Allocate descriptor base address table */ - priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; + priv->desc_bat_size = sizeof(struct ravb_desc) * info->dbat_entry_num; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, &priv->desc_bat_dma, GFP_KERNEL); if (!priv->desc_bat) { @@ -3055,7 +3033,7 @@ static int ravb_probe(struct platform_device *pdev) error = -ENOMEM; goto out_rpm_put; } - for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) + for (q = RAVB_BE; q < info->dbat_entry_num; q++) priv->desc_bat[q].die_dt = DT_EOS; /* Initialise HW timestamp list */ @@ -3110,7 +3088,6 @@ static int ravb_probe(struct platform_device *pdev) netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -3294,10 +3271,8 @@ static int ravb_resume(struct device *dev) return 0; out_rpm_put: - if (!priv->wol_enabled) { - pm_runtime_mark_last_busy(dev); + if (!priv->wol_enabled) pm_runtime_put_autosuspend(dev); - } return ret; } diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h index f77e79e47357..9a9c232c854e 100644 --- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h +++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h @@ -9,24 +9,11 @@ #include <linux/ptp_clock_kernel.h> -#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000 - -/* driver's definitions */ -#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0) -#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1) -#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2)) -#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL - -#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0) - - struct rcar_gen4_ptp_private { void __iomem *addr; struct ptp_clock *clock; struct ptp_clock_info info; spinlock_t lock; /* For multiple registers access */ - u32 tstamp_tx_ctrl; - u32 tstamp_rx_ctrl; s64 default_addend; bool initialized; }; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index a1d4a877e5bd..aa605304fed0 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -1063,6 +1063,9 @@ struct rswitch_private { bool etha_no_runtime_change; bool gwca_halt; struct net_device *offload_brdev; + + enum hwtstamp_tx_types tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; }; bool is_rdev(const struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c index 8d8acc2124b8..e14b21148f27 100644 --- a/drivers/net/ethernet/renesas/rswitch_main.c +++ b/drivers/net/ethernet/renesas/rswitch_main.c @@ -30,6 +30,8 @@ #include "rswitch.h" #include "rswitch_l2.h" +#define RSWITCH_GPTP_OFFSET_S4 0x00018000 + static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) { u32 val; @@ -843,7 +845,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) if (!skb) goto out; - get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + get_ts = rdev->priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; if (get_ts) { struct skb_shared_hwtstamps *shhwtstamps; struct timespec64 ts; @@ -1793,88 +1795,54 @@ static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) return &ndev->stats; } -static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) +static int rswitch_hwstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct rswitch_device *rdev = netdev_priv(ndev); - struct rcar_gen4_ptp_private *ptp_priv; - struct hwtstamp_config config; - - ptp_priv = rdev->priv->ptp_priv; + struct rswitch_private *priv = rdev->priv; - config.flags = 0; - config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : - HWTSTAMP_TX_OFF; - switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { - case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RCAR_GEN4_RXTSTAMP_TYPE_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config.rx_filter = HWTSTAMP_FILTER_NONE; - break; - } + config->flags = 0; + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; + return 0; } -static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) +static int rswitch_hwstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct rswitch_device *rdev = netdev_priv(ndev); - u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; - struct hwtstamp_config config; - u32 tstamp_tx_ctrl; - - if (copy_from_user(&config, req->ifr_data, sizeof(config))) - return -EFAULT; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; - if (config.flags) + if (config->flags) return -EINVAL; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: - config.rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; break; } - rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; - rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; -} - -static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) -{ - if (!netif_running(ndev)) - return -EINVAL; + rdev->priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + rdev->priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - switch (cmd) { - case SIOCGHWTSTAMP: - return rswitch_hwstamp_get(ndev, req); - case SIOCSHWTSTAMP: - return rswitch_hwstamp_set(ndev, req); - default: - return phy_mii_ioctl(ndev->phydev, req, cmd); - } + return 0; } static int rswitch_get_port_parent_id(struct net_device *ndev, @@ -1905,11 +1873,13 @@ static const struct net_device_ops rswitch_netdev_ops = { .ndo_stop = rswitch_stop, .ndo_start_xmit = rswitch_start_xmit, .ndo_get_stats = rswitch_get_stats, - .ndo_eth_ioctl = rswitch_eth_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_get_port_parent_id = rswitch_get_port_parent_id, .ndo_get_phys_port_name = rswitch_get_phys_port_name, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_hwtstamp_get = rswitch_hwstamp_get, + .ndo_hwtstamp_set = rswitch_hwstamp_set, }; bool is_rdev(const struct net_device *ndev) @@ -2190,7 +2160,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) if (IS_ERR(priv->addr)) return PTR_ERR(priv->addr); - priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; + priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); if (ret < 0) { diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 15a043e85431..fdb1e7b7fb06 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -62,6 +62,9 @@ struct rtsn_private { int tx_data_irq; int rx_data_irq; + + u32 tstamp_tx_ctrl; + u32 tstamp_rx_ctrl; }; static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg) @@ -162,8 +165,7 @@ static int rtsn_rx(struct net_device *ndev, int budget) unsigned int i; bool get_ts; - get_ts = priv->ptp_priv->tstamp_rx_ctrl & - RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx; rx_packets = 0; @@ -1122,31 +1124,16 @@ static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) static int rtsn_hwtstamp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) { - struct rcar_gen4_ptp_private *ptp_priv; struct rtsn_private *priv; if (!netif_running(ndev)) return -ENODEV; priv = netdev_priv(ndev); - ptp_priv = priv->ptp_priv; config->flags = 0; - - config->tx_type = - ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - - switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { - case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: - config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RCAR_GEN4_RXTSTAMP_TYPE_ALL: - config->rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config->rx_filter = HWTSTAMP_FILTER_NONE; - break; - } + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; return 0; } @@ -1155,26 +1142,22 @@ static int rtsn_hwtstamp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack) { - struct rcar_gen4_ptp_private *ptp_priv; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; struct rtsn_private *priv; - u32 tstamp_rx_ctrl; - u32 tstamp_tx_ctrl; if (!netif_running(ndev)) return -ENODEV; priv = netdev_priv(ndev); - ptp_priv = priv->ptp_priv; if (config->flags) return -EINVAL; switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; @@ -1182,21 +1165,17 @@ static int rtsn_hwtstamp_set(struct net_device *ndev, switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | - RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: config->rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | - RCAR_GEN4_RXTSTAMP_TYPE_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; break; } - ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; - ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + priv->tstamp_rx_ctrl = tstamp_rx_ctrl; return 0; } diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h index 5a09e946a276..577efe66573e 100644 --- a/drivers/net/ethernet/spacemit/k1_emac.h +++ b/drivers/net/ethernet/spacemit/k1_emac.h @@ -363,7 +363,7 @@ struct emac_desc { /* Keep stats in this order, index used for accessing hardware */ union emac_hw_tx_stats { - struct { + struct individual_tx_stats { u64 tx_ok_pkts; u64 tx_total_pkts; u64 tx_ok_bytes; @@ -378,11 +378,11 @@ union emac_hw_tx_stats { u64 tx_pause_pkts; } stats; - DECLARE_FLEX_ARRAY(u64, array); + u64 array[sizeof(struct individual_tx_stats) / sizeof(u64)]; }; union emac_hw_rx_stats { - struct { + struct individual_rx_stats { u64 rx_ok_pkts; u64 rx_total_pkts; u64 rx_crc_err_pkts; @@ -410,7 +410,7 @@ union emac_hw_rx_stats { u64 rx_truncate_fifo_full_pkts; } stats; - DECLARE_FLEX_ARRAY(u64, array); + u64 array[sizeof(struct individual_rx_stats) / sizeof(u64)]; }; #endif /* _K1_EMAC_H_ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 9507131875b2..87c5bea6c2a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -10,6 +10,7 @@ config STMMAC_ETH select PHYLINK select CRC32 select RESET_CONTROLLER + select NET_DEVLINK help This is the driver for the Ethernet IPs built around a Synopsys IP Core. @@ -67,6 +68,15 @@ config DWMAC_ANARION This selects the Anarion SoC glue layer support for the stmmac driver. +config DWMAC_EIC7700 + tristate "Support for Eswin eic7700 ethernet driver" + depends on OF && HAS_DMA && ARCH_ESWIN || COMPILE_TEST + help + This driver supports the Eswin EIC7700 Ethernet controller, + which integrates Synopsys DesignWare QoS features. It enables + high-speed networking with DMA acceleration and is optimized + for embedded systems. + config DWMAC_INGENIC tristate "Ingenic MAC support" default MACH_INGENIC diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 51e068e26ce4..1681a8a28313 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -7,13 +7,14 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \ - $(stmmac-y) + stmmac_pcs.o $(stmmac-y) stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o +obj-$(CONFIG_DWMAC_EIC7700) += dwmac-eic7700.o obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 8f34c9ad457f..7395bbb94aea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -26,6 +26,9 @@ #include "hwif.h" #include "mmc.h" +#define DWMAC_SNPSVER GENMASK_U32(7, 0) +#define DWMAC_USERVER GENMASK_U32(15, 8) + /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 @@ -43,6 +46,11 @@ #define DWXGMAC_ID 0x76 #define DWXLGMAC_ID 0x27 +static inline bool dwmac_is_xmac(enum dwmac_core_type core_type) +{ + return core_type == DWMAC_CORE_GMAC4 || core_type == DWMAC_CORE_XGMAC; +} + #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ /* TX and RX Descriptor Length, these need to be power of two. @@ -192,9 +200,6 @@ struct stmmac_extra_stats { unsigned long irq_pcs_ane_n; unsigned long irq_pcs_link_n; unsigned long irq_rgmii_n; - unsigned long pcs_link; - unsigned long pcs_duplex; - unsigned long pcs_speed; /* debug register */ unsigned long mtl_tx_status_fifo_full; unsigned long mtl_tx_fifo_not_empty; @@ -273,7 +278,6 @@ struct stmmac_safety_stats { #define FLOW_AUTO (FLOW_TX | FLOW_RX) /* PCS defines */ -#define STMMAC_PCS_RGMII (1 << 0) #define STMMAC_PCS_SGMII (1 << 1) #define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ @@ -309,6 +313,16 @@ struct stmmac_safety_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* phy_intf_sel_i and ACTPHYIF encodings */ +#define PHY_INTF_SEL_GMII_MII 0 +#define PHY_INTF_SEL_RGMII 1 +#define PHY_INTF_SEL_SGMII 2 +#define PHY_INTF_SEL_TBI 3 +#define PHY_INTF_SEL_RMII 4 +#define PHY_INTF_SEL_RTBI 5 +#define PHY_INTF_SEL_SMII 6 +#define PHY_INTF_SEL_REVMII 7 + /* MSI defines */ #define STMMAC_MSI_VEC_MAX 32 @@ -603,13 +617,18 @@ struct mac_device_info { unsigned int mcast_bits_log2; unsigned int rx_csum; unsigned int pcs; - unsigned int ps; unsigned int xlgmac; unsigned int num_vlan; u32 vlan_filter[32]; bool vlan_fail_q_en; u8 vlan_fail_q; bool hw_vlan_en; + bool reverse_sgmii_enable; + + /* This spinlock protects read-modify-write of the interrupt + * mask/enable registers. + */ + spinlock_t irq_ctrl_lock; }; struct stmmac_rx_routing { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index e8539cad4602..c7cd6497d42d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -109,7 +109,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, } /* dwc-qos needs GMAC4, AAL, TSO and PMT */ - plat_dat->has_gmac4 = 1; + plat_dat->core_type = DWMAC_CORE_GMAC4; plat_dat->dma_cfg->aal = 1; plat_dat->flags |= STMMAC_FLAG_TSO_EN; plat_dat->pmt = 1; @@ -162,7 +162,7 @@ static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode) priv = netdev_priv(dev_get_drvdata(eqos->dev)); /* Calibration should be done with the MDIO bus idle */ - mutex_lock(&priv->mii->mdio_lock); + stmmac_mdio_lock(priv); /* calibrate */ value = readl(eqos->regs + SDMEMCOMPPADCTRL); @@ -198,7 +198,7 @@ static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode) value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; writel(value, eqos->regs + SDMEMCOMPPADCTRL); - mutex_unlock(&priv->mii->mdio_lock); + stmmac_mdio_unlock(priv); } else { value = readl(eqos->regs + AUTO_CAL_CONFIG); value &= ~AUTO_CAL_CONFIG_ENABLE; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c new file mode 100644 index 000000000000..1dcf2037001e --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Eswin DWC Ethernet linux driver + * + * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd. + * + * Authors: + * Zhi Li <lizhi2@eswincomputing.com> + * Shuang Liang <liangshuang@eswincomputing.com> + * Shangjuan Wei <weishangjuan@eswincomputing.com> + */ + +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/pm_runtime.h> +#include <linux/stmmac.h> +#include <linux/regmap.h> +#include <linux/of.h> + +#include "stmmac_platform.h" + +/* eth_phy_ctrl_offset eth0:0x100 */ +#define EIC7700_ETH_TX_CLK_SEL BIT(16) +#define EIC7700_ETH_PHY_INTF_SELI BIT(0) + +/* eth_axi_lp_ctrl_offset eth0:0x108 */ +#define EIC7700_ETH_CSYSREQ_VAL BIT(0) + +/* + * TX/RX Clock Delay Bit Masks: + * - TX Delay: bits [14:8] — TX_CLK delay (unit: 0.1ns per bit) + * - RX Delay: bits [30:24] — RX_CLK delay (unit: 0.1ns per bit) + */ +#define EIC7700_ETH_TX_ADJ_DELAY GENMASK(14, 8) +#define EIC7700_ETH_RX_ADJ_DELAY GENMASK(30, 24) + +#define EIC7700_MAX_DELAY_UNIT 0x7F + +static const char * const eic7700_clk_names[] = { + "tx", "axi", "cfg", +}; + +struct eic7700_qos_priv { + struct plat_stmmacenet_data *plat_dat; +}; + +static int eic7700_clks_config(void *priv, bool enabled) +{ + struct eic7700_qos_priv *dwc = (struct eic7700_qos_priv *)priv; + struct plat_stmmacenet_data *plat = dwc->plat_dat; + int ret = 0; + + if (enabled) + ret = clk_bulk_prepare_enable(plat->num_clks, plat->clks); + else + clk_bulk_disable_unprepare(plat->num_clks, plat->clks); + + return ret; +} + +static int eic7700_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct eic7700_qos_priv *dwc = priv; + + return eic7700_clks_config(dwc, true); +} + +static void eic7700_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct eic7700_qos_priv *dwc = priv; + + eic7700_clks_config(dwc, false); +} + +static int eic7700_dwmac_suspend(struct device *dev, void *priv) +{ + return pm_runtime_force_suspend(dev); +} + +static int eic7700_dwmac_resume(struct device *dev, void *priv) +{ + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + dev_err(dev, "%s failed: %d\n", __func__, ret); + + return ret; +} + +static int eic7700_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct eic7700_qos_priv *dwc_priv; + struct regmap *eic7700_hsp_regmap; + u32 eth_axi_lp_ctrl_offset; + u32 eth_phy_ctrl_offset; + u32 eth_phy_ctrl_regset; + u32 eth_rxd_dly_offset; + u32 eth_dly_param = 0; + u32 delay_ps; + int i, ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to get resources\n"); + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); + + dwc_priv = devm_kzalloc(&pdev->dev, sizeof(*dwc_priv), GFP_KERNEL); + if (!dwc_priv) + return -ENOMEM; + + /* Read rx-internal-delay-ps and update rx_clk delay */ + if (!of_property_read_u32(pdev->dev.of_node, + "rx-internal-delay-ps", &delay_ps)) { + u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT); + + eth_dly_param &= ~EIC7700_ETH_RX_ADJ_DELAY; + eth_dly_param |= FIELD_PREP(EIC7700_ETH_RX_ADJ_DELAY, val); + } else { + return dev_err_probe(&pdev->dev, -EINVAL, + "missing required property rx-internal-delay-ps\n"); + } + + /* Read tx-internal-delay-ps and update tx_clk delay */ + if (!of_property_read_u32(pdev->dev.of_node, + "tx-internal-delay-ps", &delay_ps)) { + u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT); + + eth_dly_param &= ~EIC7700_ETH_TX_ADJ_DELAY; + eth_dly_param |= FIELD_PREP(EIC7700_ETH_TX_ADJ_DELAY, val); + } else { + return dev_err_probe(&pdev->dev, -EINVAL, + "missing required property tx-internal-delay-ps\n"); + } + + eic7700_hsp_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "eswin,hsp-sp-csr"); + if (IS_ERR(eic7700_hsp_regmap)) + return dev_err_probe(&pdev->dev, + PTR_ERR(eic7700_hsp_regmap), + "Failed to get hsp-sp-csr regmap\n"); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 1, ð_phy_ctrl_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_phy_ctrl_offset\n"); + + regmap_read(eic7700_hsp_regmap, eth_phy_ctrl_offset, + ð_phy_ctrl_regset); + eth_phy_ctrl_regset |= + (EIC7700_ETH_TX_CLK_SEL | EIC7700_ETH_PHY_INTF_SELI); + regmap_write(eic7700_hsp_regmap, eth_phy_ctrl_offset, + eth_phy_ctrl_regset); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 2, ð_axi_lp_ctrl_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_axi_lp_ctrl_offset\n"); + + regmap_write(eic7700_hsp_regmap, eth_axi_lp_ctrl_offset, + EIC7700_ETH_CSYSREQ_VAL); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 3, ð_rxd_dly_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_rxd_dly_offset\n"); + + regmap_write(eic7700_hsp_regmap, eth_rxd_dly_offset, + eth_dly_param); + + plat_dat->num_clks = ARRAY_SIZE(eic7700_clk_names); + plat_dat->clks = devm_kcalloc(&pdev->dev, + plat_dat->num_clks, + sizeof(*plat_dat->clks), + GFP_KERNEL); + if (!plat_dat->clks) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(eic7700_clk_names); i++) + plat_dat->clks[i].id = eic7700_clk_names[i]; + + ret = devm_clk_bulk_get_optional(&pdev->dev, + plat_dat->num_clks, + plat_dat->clks); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to get clocks\n"); + + plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx"); + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->clks_config = eic7700_clks_config; + plat_dat->bsp_priv = dwc_priv; + dwc_priv->plat_dat = plat_dat; + plat_dat->init = eic7700_dwmac_init; + plat_dat->exit = eic7700_dwmac_exit; + plat_dat->suspend = eic7700_dwmac_suspend; + plat_dat->resume = eic7700_dwmac_resume; + + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); +} + +static const struct of_device_id eic7700_dwmac_match[] = { + { .compatible = "eswin,eic7700-qos-eth" }, + { } +}; +MODULE_DEVICE_TABLE(of, eic7700_dwmac_match); + +static struct platform_driver eic7700_dwmac_driver = { + .probe = eic7700_dwmac_probe, + .driver = { + .name = "eic7700-eth-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = eic7700_dwmac_match, + }, +}; +module_platform_driver(eic7700_dwmac_driver); + +MODULE_AUTHOR("Zhi Li <lizhi2@eswincomputing.com>"); +MODULE_AUTHOR("Shuang Liang <liangshuang@eswincomputing.com>"); +MODULE_AUTHOR("Shangjuan Wei <weishangjuan@eswincomputing.com>"); +MODULE_DESCRIPTION("Eswin eic7700 qos ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 4268b9987237..db288fbd5a4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -23,18 +23,13 @@ #include "stmmac_platform.h" #define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16) -#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16) -#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16) -#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16) +#define GPR_ENET_QOS_INTF_SEL_MASK GENMASK(20, 16) #define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19) #define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20) #define GPR_ENET_QOS_RGMII_EN (0x1 << 21) #define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0) -#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1) +#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1) #define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0) #define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0) #define MX93_GPR_CLK_SEL_OFFSET (4) @@ -44,13 +39,15 @@ #define RMII_RESET_SPEED (0x3 << 14) #define CTRL_SPEED_MASK GENMASK(15, 14) +struct imx_priv_data; + struct imx_dwmac_ops { u32 addr_width; u32 flags; bool mac_rgmii_txclk_auto_adj; int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr); - int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat); + int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel); void (*fix_mac_speed)(void *priv, int speed, unsigned int mode); }; @@ -67,79 +64,46 @@ struct imx_priv_data { struct plat_stmmacenet_data *plat_dat; }; -static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +static int imx8mp_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - struct imx_priv_data *dwmac = plat_dat->bsp_priv; - int val; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = GPR_ENET_QOS_INTF_SEL_MII; - break; - case PHY_INTERFACE_MODE_RMII: - val = GPR_ENET_QOS_INTF_SEL_RMII; - val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - val = GPR_ENET_QOS_INTF_SEL_RGMII | - GPR_ENET_QOS_RGMII_EN; - break; - default: - pr_debug("imx dwmac doesn't support %s interface\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + unsigned int val; + + val = FIELD_PREP(GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) | + GPR_ENET_QOS_CLK_GEN_EN; + + if (phy_intf_sel == PHY_INTF_SEL_RMII && !dwmac->rmii_refclk_ext) + val |= GPR_ENET_QOS_CLK_TX_CLK_SEL; + else if (phy_intf_sel == PHY_INTF_SEL_RGMII) + val |= GPR_ENET_QOS_RGMII_EN; - val |= GPR_ENET_QOS_CLK_GEN_EN; return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, GPR_ENET_QOS_INTF_MODE_MASK, val); }; static int -imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +imx8dxl_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - int ret = 0; - /* TBD: depends on imx8dxl scu interfaces to be upstreamed */ - return ret; + return 0; } -static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - struct imx_priv_data *dwmac = plat_dat->bsp_priv; - int val, ret; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = MX93_GPR_ENET_QOS_INTF_SEL_MII; - break; - case PHY_INTERFACE_MODE_RMII: - if (dwmac->rmii_refclk_ext) { - ret = regmap_clear_bits(dwmac->intf_regmap, - dwmac->intf_reg_off + - MX93_GPR_CLK_SEL_OFFSET, - MX93_GPR_ENET_QOS_CLK_SEL_MASK); - if (ret) - return ret; - } - val = MX93_GPR_ENET_QOS_INTF_SEL_RMII; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII; - break; - default: - dev_dbg(dwmac->dev, "imx dwmac doesn't support %s interface\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; + unsigned int val; + int ret; + + if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) { + ret = regmap_clear_bits(dwmac->intf_regmap, + dwmac->intf_reg_off + + MX93_GPR_CLK_SEL_OFFSET, + MX93_GPR_ENET_QOS_CLK_SEL_MASK); + if (ret) + return ret; } - val |= MX93_GPR_ENET_QOS_CLK_GEN_EN; + val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) | + MX93_GPR_ENET_QOS_CLK_GEN_EN; + return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, MX93_GPR_ENET_QOS_INTF_MODE_MASK, val); }; @@ -170,34 +134,24 @@ static int imx_dwmac_clks_config(void *priv, bool enabled) return ret; } -static int imx_dwmac_init(struct platform_device *pdev, void *priv) +static int imx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) { - struct plat_stmmacenet_data *plat_dat; - struct imx_priv_data *dwmac = priv; - int ret; - - plat_dat = dwmac->plat_dat; + struct imx_priv_data *dwmac = bsp_priv; - if (dwmac->ops->set_intf_mode) { - ret = dwmac->ops->set_intf_mode(plat_dat); - if (ret) - return ret; - } + if (!dwmac->ops->set_intf_mode) + return 0; - return 0; -} + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) + return -EINVAL; -static void imx_dwmac_exit(struct platform_device *pdev, void *priv) -{ - /* nothing to do now */ + return dwmac->ops->set_intf_mode(dwmac, phy_intf_sel); } static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, phy_interface_t interface, int speed) { - struct imx_priv_data *dwmac = bsp_priv; - - interface = dwmac->plat_dat->phy_interface; if (interface == PHY_INTERFACE_MODE_RMII || interface == PHY_INTERFACE_MODE_MII) return 0; @@ -244,8 +198,8 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode) if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface)) return; - iface &= MX93_GPR_ENET_QOS_INTF_MASK; - if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII) + if (FIELD_GET(MX93_GPR_ENET_QOS_INTF_SEL_MASK, iface) != + PHY_INTF_SEL_RGMII) return; old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG); @@ -258,6 +212,7 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode) readl(dwmac->base_addr + MAC_CTRL_REG); usleep_range(10, 20); + iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK; iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN; regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface); @@ -370,8 +325,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->tx_queues_cfg[i].tbs_en = 1; plat_dat->host_dma_width = dwmac->ops->addr_width; - plat_dat->init = imx_dwmac_init; - plat_dat->exit = imx_dwmac_exit; + plat_dat->set_phy_intf_sel = imx_set_phy_intf_sel; plat_dat->clks_config = imx_dwmac_clks_config; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index c1670f6bae14..8e4a30c11db0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -35,10 +35,6 @@ #define MACPHYC_RX_DELAY_MASK GENMASK(10, 4) #define MACPHYC_SOFT_RST_MASK GENMASK(3, 3) #define MACPHYC_PHY_INFT_MASK GENMASK(2, 0) -#define MACPHYC_PHY_INFT_RMII 0x4 -#define MACPHYC_PHY_INFT_RGMII 0x1 -#define MACPHYC_PHY_INFT_GMII 0x0 -#define MACPHYC_PHY_INFT_MII 0x0 #define MACPHYC_TX_DELAY_PS_MAX 2496 #define MACPHYC_TX_DELAY_PS_MIN 20 @@ -68,172 +64,93 @@ struct ingenic_soc_info { enum ingenic_mac_version version; u32 mask; - int (*set_mode)(struct plat_stmmacenet_data *plat_dat); -}; - -static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv) -{ - struct ingenic_mac *mac = bsp_priv; - int ret; + int (*set_mode)(struct ingenic_mac *mac, u8 phy_intf_sel); - if (mac->soc_info->set_mode) { - ret = mac->soc_info->set_mode(mac->plat_dat); - if (ret) - return ret; - } - - return 0; -} + u8 valid_phy_intf_sel; +}; -static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int jz4775_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n"); - break; - - case PHY_INTERFACE_MODE_GMII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n"); - break; - - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel) | + FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } - /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0); } -static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1600_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1830_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) | + FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x2000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) | - FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII); + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) | + FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN); + } else if (phy_intf_sel == PHY_INTF_SEL_RGMII) { if (mac->tx_delay == 0) val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN); else val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) | - FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1); + FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1); if (mac->rx_delay == 0) val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN); else val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) | FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1); - - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; } /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } +static int ingenic_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) +{ + struct ingenic_mac *mac = bsp_priv; + + if (!mac->soc_info->set_mode) + return 0; + + if (phy_intf_sel >= BITS_PER_BYTE || + ~mac->soc_info->valid_phy_intf_sel & BIT(phy_intf_sel)) + return -EINVAL; + + dev_dbg(mac->dev, "MAC PHY control register: interface %s\n", + phy_modes(mac->plat_dat->phy_interface)); + + return mac->soc_info->set_mode(mac, phy_intf_sel); +} + static int ingenic_mac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -293,7 +210,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->plat_dat = plat_dat; plat_dat->bsp_priv = mac; - plat_dat->init = ingenic_mac_init; + plat_dat->set_phy_intf_sel = ingenic_set_phy_intf_sel; return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } @@ -303,6 +220,9 @@ static struct ingenic_soc_info jz4775_soc_info = { .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = jz4775_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_GMII_MII) | + BIT(PHY_INTF_SEL_RGMII) | + BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1000_soc_info = { @@ -310,6 +230,7 @@ static struct ingenic_soc_info x1000_soc_info = { .mask = MACPHYC_SOFT_RST_MASK, .set_mode = x1000_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1600_soc_info = { @@ -317,6 +238,7 @@ static struct ingenic_soc_info x1600_soc_info = { .mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x1600_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1830_soc_info = { @@ -324,6 +246,7 @@ static struct ingenic_soc_info x1830_soc_info = { .mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x1830_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x2000_soc_info = { @@ -332,6 +255,8 @@ static struct ingenic_soc_info x2000_soc_info = { MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x2000_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RGMII) | + BIT(PHY_INTF_SEL_RMII), }; static const struct of_device_id ingenic_mac_of_matches[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index e74d00984b88..b2194e414ec1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -565,7 +565,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat) { /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; plat->mdio_bus_data->needs_reset = true; @@ -612,8 +612,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->pdev = pdev; plat->phy_addr = -1; plat->clk_csr = STMMAC_CSR_250_300M; - plat->has_gmac = 0; - plat->has_gmac4 = 1; + plat->core_type = DWMAC_CORE_GMAC4; plat->force_sf_dma_mode = 0; plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index ca4035cbb55b..c05f85534f0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -473,7 +473,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return err; } - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->bsp_priv = gmac; plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate; plat_dat->multicast_filter_bins = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 592aa9d636e5..dd2fc39ec3e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -92,7 +92,7 @@ static void loongson_default_data(struct pci_dev *pdev, /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; /* Set default value for multicast hash bins */ @@ -320,10 +320,9 @@ static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv, return ret; } -static struct mac_device_info *loongson_dwmac_setup(void *apriv) +static int loongson_dwmac_setup(void *apriv, struct mac_device_info *mac) { struct stmmac_priv *priv = apriv; - struct mac_device_info *mac; struct stmmac_dma_ops *dma; struct loongson_data *ld; struct pci_dev *pdev; @@ -331,13 +330,9 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) ld = priv->plat->bsp_priv; pdev = to_pci_dev(priv->device); - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); - if (!mac) - return NULL; - dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL); if (!dma) - return NULL; + return -ENOMEM; /* The Loongson GMAC and GNET devices are based on the DW GMAC * v3.50a and v3.73a IP-cores. But the HW designers have changed @@ -396,7 +391,7 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - return mac; + return 0; } static int loongson_dwmac_msi_config(struct pci_dev *pdev, @@ -598,7 +593,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id goto err_disable_device; plat->bsp_priv = ld; - plat->setup = loongson_dwmac_setup; + plat->mac_setup = loongson_dwmac_setup; plat->fix_soc_reset = loongson_dwmac_fix_reset; plat->suspend = loongson_dwmac_suspend; plat->resume = loongson_dwmac_resume; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c index 32b5d1492e2e..894ee66f5c9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c @@ -38,8 +38,6 @@ #define GMAC_SHUT BIT(6) #define PHY_INTF_SELI GENMASK(30, 28) -#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0) -#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4) struct ls1x_dwmac { struct plat_stmmacenet_data *plat_dat; @@ -140,22 +138,18 @@ static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv) struct ls1x_dwmac *dwmac = priv; struct plat_stmmacenet_data *plat = dwmac->plat_dat; struct regmap *regmap = dwmac->regmap; + int phy_intf_sel; - switch (plat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, - PHY_INTF_MII); - break; - case PHY_INTERFACE_MODE_RMII: - regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, - PHY_INTF_RMII); - break; - default: + phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(&pdev->dev, "Unsupported PHY-mode %u\n", plat->phy_interface); return -EOPNOTSUPP; } + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + FIELD_PREP(PHY_INTF_SELI, phy_intf_sel)); regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 2562a6d036a2..c68d7de1f8ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -21,16 +21,29 @@ /* Register defines for CREG syscon */ #define LPC18XX_CREG_CREG6 0x12c -# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7 -# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0 -# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4 +# define LPC18XX_CREG_CREG6_ETHMODE_MASK GENMASK(2, 0) + +static int lpc18xx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) +{ + struct regmap *reg = bsp_priv; + + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RMII) + return -EINVAL; + + regmap_update_bits(reg, LPC18XX_CREG_CREG6, + LPC18XX_CREG_CREG6_ETHMODE_MASK, + FIELD_PREP(LPC18XX_CREG_CREG6_ETHMODE_MASK, + phy_intf_sel)); + + return 0; +} static int lpc18xx_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - struct regmap *reg; - u8 ethmode; + struct regmap *regmap; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -41,25 +54,16 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; - reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); - if (IS_ERR(reg)) { + regmap = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(regmap)) { dev_err(&pdev->dev, "syscon lookup failed\n"); - return PTR_ERR(reg); - } - - if (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII) { - ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) { - ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; - } else { - dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); - return -EINVAL; + return PTR_ERR(regmap); } - regmap_update_bits(reg, LPC18XX_CREG_CREG6, - LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); + plat_dat->bsp_priv = regmap; + plat_dat->set_phy_intf_sel = lpc18xx_set_phy_intf_sel; return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index f1b36f0a401d..1f2d7d19ca56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -17,9 +17,6 @@ /* Peri Configuration register for mt2712 */ #define PERI_ETH_PHY_INTF_SEL 0x418 -#define PHY_INTF_MII 0 -#define PHY_INTF_RGMII 1 -#define PHY_INTF_RMII 4 #define RMII_CLK_SRC_RXC BIT(4) #define RMII_CLK_SRC_INTERNAL BIT(5) @@ -88,7 +85,8 @@ struct mediatek_dwmac_plat_data { }; struct mediatek_dwmac_variant { - int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat); + int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel); int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat); /* clock ids to be requested */ @@ -109,29 +107,16 @@ static const char * const mt8195_dwmac_clk_l[] = { "axi", "apb", "mac_cg", "mac_main", "ptp_ref" }; -static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat) +static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel) { - int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0; - int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0; - u32 intf_val = 0; + u32 intf_val = phy_intf_sel; - /* select phy interface in top control domain */ - switch (plat->phy_mode) { - case PHY_INTERFACE_MODE_MII: - intf_val |= PHY_INTF_MII; - break; - case PHY_INTERFACE_MODE_RMII: - intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - intf_val |= PHY_INTF_RGMII; - break; - default: - dev_err(plat->dev, "phy interface not supported\n"); - return -EINVAL; + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + if (plat->rmii_clk_from_mac) + intf_val |= RMII_CLK_SRC_INTERNAL; + if (plat->rmii_rxc) + intf_val |= RMII_CLK_SRC_RXC; } regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val); @@ -288,30 +273,16 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = { .tx_delay_max = 17600, }; -static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat) +static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel) { - int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0; - int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0; - u32 intf_val = 0; + u32 intf_val = FIELD_PREP(MT8195_ETH_INTF_SEL, phy_intf_sel); - /* select phy interface in top control domain */ - switch (plat->phy_mode) { - case PHY_INTERFACE_MODE_MII: - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII); - break; - case PHY_INTERFACE_MODE_RMII: - intf_val |= (rmii_rxc | rmii_clk_from_mac); - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII); - break; - default: - dev_err(plat->dev, "phy interface not supported\n"); - return -EINVAL; + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + if (plat->rmii_clk_from_mac) + intf_val |= MT8195_RMII_CLK_SRC_INTERNAL; + if (plat->rmii_rxc) + intf_val |= MT8195_RMII_CLK_SRC_RXC; } /* MT8195 only support external PHY */ @@ -527,10 +498,18 @@ static int mediatek_dwmac_init(struct device *dev, void *priv) { struct mediatek_dwmac_plat_data *plat = priv; const struct mediatek_dwmac_variant *variant = plat->variant; - int ret; + int phy_intf_sel, ret; if (variant->dwmac_set_phy_interface) { - ret = variant->dwmac_set_phy_interface(plat); + phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_mode); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { + dev_err(plat->dev, "phy interface not supported\n"); + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; + } + + ret = variant->dwmac_set_phy_interface(plat, phy_intf_sel); if (ret) { dev_err(dev, "failed to set phy interface, err = %d\n", ret); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index a50782994b97..e4d5c41294f4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -26,8 +26,6 @@ #define PRG_ETH0_RGMII_MODE BIT(0) #define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0) -#define PRG_ETH0_EXT_RGMII_MODE 1 -#define PRG_ETH0_EXT_RMII_MODE 4 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) @@ -238,28 +236,20 @@ static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac) static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac) { - switch (dwmac->phy_mode) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - /* enable RGMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, - PRG_ETH0_EXT_PHY_MODE_MASK, - PRG_ETH0_EXT_RGMII_MODE); - break; - case PHY_INTERFACE_MODE_RMII: - /* disable RGMII mode -> enables RMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, - PRG_ETH0_EXT_PHY_MODE_MASK, - PRG_ETH0_EXT_RMII_MODE); - break; - default: + int phy_intf_sel; + + phy_intf_sel = stmmac_get_phy_intf_sel(dwmac->phy_mode); + if (phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(dwmac->dev, "fail to set phy-mode %s\n", phy_modes(dwmac->phy_mode)); - return -EINVAL; + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; } + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_EXT_PHY_MODE_MASK, + FIELD_PREP(PRG_ETH0_EXT_PHY_MODE_MASK, + phy_intf_sel)); + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index d8fd4d8f6ced..1a616a71c36a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -76,10 +76,6 @@ #define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6) #define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5) -/* MAC_CTRL_REG bits */ -#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) -#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) - /* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ #define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) @@ -96,7 +92,6 @@ struct ethqos_emac_driver_data { bool rgmii_config_loopback_en; bool has_emac_ge_3; const char *link_clk_name; - bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; bool needs_sgmii_loopback; @@ -282,7 +277,6 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .rgmii_config_loopback_en = false, .has_emac_ge_3 = true, .link_clk_name = "phyaux", - .has_integrated_pcs = true, .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { @@ -624,7 +618,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable) { - stmmac_pcs_ctrl_ane(priv, enable, 0, 0); + stmmac_pcs_ctrl_ane(priv, enable, 0); } /* On interface toggle MAC registers gets reset. @@ -634,13 +628,9 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) { struct net_device *dev = platform_get_drvdata(ethqos->pdev); struct stmmac_priv *priv = netdev_priv(dev); - int val; - - val = readl(ethqos->mac_base + MAC_CTRL_REG); switch (speed) { case SPEED_2500: - val &= ~ETHQOS_MAC_CTRL_PORT_SEL; rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); @@ -648,7 +638,6 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) ethqos_pcs_set_inband(priv, false); break; case SPEED_1000: - val &= ~ETHQOS_MAC_CTRL_PORT_SEL; rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); @@ -656,13 +645,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) ethqos_pcs_set_inband(priv, true); break; case SPEED_100: - val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; ethqos_set_serdes_speed(ethqos, SPEED_1000); ethqos_pcs_set_inband(priv, true); break; case SPEED_10: - val |= ETHQOS_MAC_CTRL_PORT_SEL; - val &= ~ETHQOS_MAC_CTRL_SPEED_MODE; rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR, FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, SGMII_10M_RX_CLK_DVDR), @@ -672,9 +658,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) break; } - writel(val, ethqos->mac_base + MAC_CTRL_REG); - - return val; + return 0; } static int ethqos_configure(struct qcom_ethqos *ethqos, int speed) @@ -848,7 +832,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = ethqos_fix_mac_speed; plat_dat->dump_debug_regs = rgmii_dump; plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config; - plat_dat->has_gmac4 = 1; + plat_dat->core_type = DWMAC_CORE_GMAC4; if (ethqos->has_emac_ge_3) plat_dat->dwmac4_addrs = &data->dwmac4_addrs; plat_dat->pmt = 1; @@ -856,8 +840,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_TSO_EN; if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; - if (data->has_integrated_pcs) - plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; if (data->dma_addr_width) plat_dat->host_dma_width = data->dma_addr_width; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 0786816e05f0..a5c7e03ebc63 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -827,6 +827,69 @@ static const struct rk_gmac_ops rk3399_ops = { .set_speed = rk3399_set_speed, }; +#define RK3506_GRF_SOC_CON8 0x0020 +#define RK3506_GRF_SOC_CON11 0x002c + +#define RK3506_GMAC_RMII_MODE GRF_BIT(1) + +#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3) +#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3) + +#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5) +#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5) + +#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2) +#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2) + +static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + unsigned int id = bsp_priv->id, offset; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE); +} + +static const struct rk_reg_speed_data rk3506_reg_speed_data = { + .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20, + .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2, +}; + +static int rk3506_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + unsigned int id = bsp_priv->id, offset; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data, + offset, interface, speed); +} + +static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv, + bool input, bool enable) +{ + unsigned int value, offset, id = bsp_priv->id; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + + value = input ? RK3506_GMAC_CLK_SELECT_IO : + RK3506_GMAC_CLK_SELECT_CRU; + value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE : + RK3506_GMAC_CLK_RMII_GATE; + regmap_write(bsp_priv->grf, offset, value); +} + +static const struct rk_gmac_ops rk3506_ops = { + .set_to_rmii = rk3506_set_to_rmii, + .set_speed = rk3506_set_speed, + .set_clock_selection = rk3506_set_clock_selection, + .regs_valid = true, + .regs = { + 0xff4c8000, /* gmac0 */ + 0xff4d0000, /* gmac1 */ + 0x0, /* sentinel */ + }, +}; + #define RK3528_VO_GRF_GMAC_CON 0x0018 #define RK3528_VO_GRF_MACPHY_CON0 0x001c #define RK3528_VO_GRF_MACPHY_CON1 0x0020 @@ -1751,8 +1814,8 @@ static int rk_gmac_probe(struct platform_device *pdev) /* If the stmmac is not already selected as gmac4, * then make sure we fallback to gmac. */ - if (!plat_dat->has_gmac4) { - plat_dat->has_gmac = true; + if (plat_dat->core_type != DWMAC_CORE_GMAC4) { + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->rx_fifo_size = 4096; plat_dat->tx_fifo_size = 2048; } @@ -1809,6 +1872,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, + { .compatible = "rockchip,rk3506-gmac", .data = &rk3506_ops }, { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops }, { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops }, { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c index 221539d760bc..2b7ad64bfdf7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -24,10 +24,10 @@ #define GMAC_INTF_RATE_125M 125000000 /* 125MHz */ /* SoC PHY interface control register */ -#define PHY_INTF_SEL_MII 0x00 -#define PHY_INTF_SEL_SGMII 0x01 -#define PHY_INTF_SEL_RGMII 0x02 -#define PHY_INTF_SEL_RMII 0x08 +#define S32_PHY_INTF_SEL_MII 0x00 +#define S32_PHY_INTF_SEL_SGMII 0x01 +#define S32_PHY_INTF_SEL_RGMII 0x02 +#define S32_PHY_INTF_SEL_RMII 0x08 struct s32_priv_data { void __iomem *ioaddr; @@ -40,7 +40,7 @@ struct s32_priv_data { static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac) { - writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts); + writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts); dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode)); @@ -146,7 +146,7 @@ static int s32_dwmac_probe(struct platform_device *pdev) gmac->ioaddr = res.addr; /* S32CC core feature set */ - plat->has_gmac4 = true; + plat->core_type = DWMAC_CORE_GMAC4; plat->pmt = 1; plat->flags |= STMMAC_FLAG_SPH_DISABLE; plat->rx_fifo_size = 20480; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 354f01184e6c..49d651948e2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -5,6 +5,7 @@ */ #include <linux/mfd/altera-sysmgr.h> +#include <linux/clocksource_ids.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_net.h> @@ -15,8 +16,10 @@ #include <linux/reset.h> #include <linux/stmmac.h> +#include "dwxgmac2.h" #include "stmmac.h" #include "stmmac_platform.h" +#include "stmmac_ptp.h" #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 @@ -41,9 +44,17 @@ #define SGMII_ADAPTER_ENABLE 0x0000 #define SGMII_ADAPTER_DISABLE 0x0001 +#define SMTG_MDIO_ADDR 0x15 +#define SMTG_TSC_WORD0 0xC +#define SMTG_TSC_WORD1 0xD +#define SMTG_TSC_WORD2 0xE +#define SMTG_TSC_WORD3 0xF +#define SMTG_TSC_SHIFT 16 + struct socfpga_dwmac; struct socfpga_dwmac_ops { int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv); + void (*setup_plat_dat)(struct socfpga_dwmac *dwmac_priv); }; struct socfpga_dwmac { @@ -268,6 +279,112 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) return 0; } +static void get_smtgtime(struct mii_bus *mii, int smtg_addr, u64 *smtg_time) +{ + u64 ns; + + ns = mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD3); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD2); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD1); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD0); + + *smtg_time = ns; +} + +static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + u32 num_snapshot, gpio_value, acr_value; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 smtg_time = 0; + u64 ptp_time = 0; + int i, ret; + u32 v; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal. Create a rising edge by just toggle + * the GPO0 to low and back to high. + */ + gpio_value = readl(ioaddr + XGMAC_GPIO_STATUS); + gpio_value &= ~XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + gpio_value |= XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + XGMAC_INT_STATUS, v, + (v & XGMAC_INT_TSIS), 100, 10000); + if (ret) { + netdev_err(priv->dev, "%s: Wait for time sync operation timeout\n", + __func__); + return ret; + } + + *system = (struct system_counterval_t) { + .cycles = 0, + .cs_id = CSID_ARM_ARCH_COUNTER, + .use_nsecs = false, + }; + + num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) & + XGMAC_TIMESTAMP_ATSNS_MASK) >> + XGMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + } + + get_smtgtime(priv->mii, SMTG_MDIO_ADDR, &smtg_time); + system->cycles = smtg_time; + + return 0; +} + static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; @@ -441,6 +558,43 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv) return dwmac->ops->set_phy_mode(dwmac); } +static void socfpga_gen5_setup_plat_dat(struct socfpga_dwmac *dwmac) +{ + struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat; + + plat_dat->core_type = DWMAC_CORE_GMAC; + + /* Rx watchdog timer in dwmac is buggy in this hw */ + plat_dat->riwt_off = 1; +} + +static void socfpga_agilex5_setup_plat_dat(struct socfpga_dwmac *dwmac) +{ + struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat; + + plat_dat->core_type = DWMAC_CORE_XGMAC; + + /* Enable TSO */ + plat_dat->flags |= STMMAC_FLAG_TSO_EN; + + /* Enable TBS */ + switch (plat_dat->tx_queues_to_use) { + case 8: + plat_dat->tx_queues_cfg[7].tbs_en = true; + fallthrough; + case 7: + plat_dat->tx_queues_cfg[6].tbs_en = true; + break; + default: + /* Tx Queues 0 - 5 doesn't support TBS on Agilex5 */ + break; + } + + /* Hw supported cross-timestamp */ + plat_dat->int_snapshot_num = AUX_SNAPSHOT0; + plat_dat->crosststamp = smtg_crosststamp; +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -497,25 +651,31 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; plat_dat->select_pcs = socfpga_dwmac_select_pcs; - plat_dat->has_gmac = true; - plat_dat->riwt_off = 1; + ops->setup_plat_dat(dwmac); return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, + .setup_plat_dat = socfpga_gen5_setup_plat_dat, }; static const struct socfpga_dwmac_ops socfpga_gen10_ops = { .set_phy_mode = socfpga_gen10_set_phy_mode, + .setup_plat_dat = socfpga_gen5_setup_plat_dat, +}; + +static const struct socfpga_dwmac_ops socfpga_agilex5_ops = { + .set_phy_mode = socfpga_gen10_set_phy_mode, + .setup_plat_dat = socfpga_agilex5_setup_plat_dat, }; static const struct of_device_id socfpga_dwmac_match[] = { { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops }, { .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops }, - { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_gen10_ops }, + { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_agilex5_ops }, { } }; MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 6938dd2a79b7..16b955a6d77b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -15,8 +15,6 @@ #include "stmmac_platform.h" -#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1 -#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4 #define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U #define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8 @@ -35,25 +33,15 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) struct starfive_dwmac *dwmac = plat_dat->bsp_priv; struct regmap *regmap; unsigned int args[2]; - unsigned int mode; + int phy_intf_sel; int err; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - mode = STARFIVE_DWMAC_PHY_INFT_RMII; - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - mode = STARFIVE_DWMAC_PHY_INFT_RGMII; - break; - - default: + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(dwmac->dev, "unsupported interface %s\n", phy_modes(plat_dat->phy_interface)); - return -EINVAL; + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; } regmap = syscon_regmap_lookup_by_phandle_args(dwmac->dev->of_node, @@ -65,7 +53,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) /* args[0]:offset args[1]: shift */ err = regmap_update_bits(regmap, args[0], STARFIVE_DWMAC_PHY_INFT_FIELD << args[1], - mode << args[1]); + phy_intf_sel << args[1]); if (err) return dev_err_probe(dwmac->dev, err, "error setting phy mode\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 53d5ce1f6dc6..b0509ab6b31c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -77,13 +77,9 @@ * 001-RGMII * 010-SGMII * 100-RMII + * These are the DW MAC phy_intf_sel values. */ #define MII_PHY_SEL_MASK GENMASK(4, 2) -#define ETH_PHY_SEL_RMII BIT(4) -#define ETH_PHY_SEL_SGMII BIT(3) -#define ETH_PHY_SEL_RGMII BIT(2) -#define ETH_PHY_SEL_GMII 0x0 -#define ETH_PHY_SEL_MII 0x0 struct sti_dwmac { phy_interface_t interface; /* MII interface */ @@ -102,15 +98,6 @@ struct sti_dwmac_of_data { void (*fix_retime_src)(void *priv, int speed, unsigned int mode); }; -static u32 phy_intf_sels[] = { - [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, - [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, - [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, - [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, - [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, - [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, -}; - enum { TX_RETIME_SRC_NA = 0, TX_RETIME_SRC_TXCLK = 1, @@ -159,19 +146,28 @@ static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode) stih4xx_tx_retime_val[src]); } -static int sti_dwmac_set_mode(struct sti_dwmac *dwmac) +static int sti_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) { - struct regmap *regmap = dwmac->regmap; - int iface = dwmac->interface; - u32 reg = dwmac->ctrl_reg; - u32 val; + struct sti_dwmac *dwmac = bsp_priv; + struct regmap *regmap; + u32 reg, val; + + regmap = dwmac->regmap; + reg = dwmac->ctrl_reg; if (dwmac->gmac_en) regmap_update_bits(regmap, reg, EN_MASK, EN); - regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_SGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) + phy_intf_sel = PHY_INTF_SEL_GMII_MII; + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, + FIELD_PREP(MII_PHY_SEL_MASK, phy_intf_sel)); - val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + val = (dwmac->interface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; regmap_update_bits(regmap, reg, ENMII_MASK, val); dwmac->fix_retime_src(dwmac, dwmac->speed, 0); @@ -236,17 +232,8 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv) { struct sti_dwmac *dwmac = bsp_priv; - int ret; - - ret = clk_prepare_enable(dwmac->clk); - if (ret) - return ret; - - ret = sti_dwmac_set_mode(dwmac); - if (ret) - clk_disable_unprepare(dwmac->clk); - return ret; + return clk_prepare_enable(dwmac->clk); } static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv) @@ -291,6 +278,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) dwmac->fix_retime_src = data->fix_retime_src; plat_dat->bsp_priv = dwmac; + plat_dat->set_phy_intf_sel = sti_set_phy_intf_sel; plat_dat->fix_mac_speed = data->fix_retime_src; plat_dat->init = sti_dwmac_init; plat_dat->exit = sti_dwmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 6c179911ef3f..e1b260ed4790 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -47,23 +47,18 @@ *------------------------------------------ */ #define SYSCFG_PMCR_ETH_SEL_MII BIT(20) -#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) -#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) -#define SYSCFG_PMCR_ETH_SEL_GMII 0 +#define SYSCFG_PMCR_PHY_INTF_SEL_MASK GENMASK(23, 21) #define SYSCFG_MCU_ETH_SEL_MII 0 #define SYSCFG_MCU_ETH_SEL_RMII 1 /* STM32MP2 register definitions */ #define SYSCFG_MP2_ETH_MASK GENMASK(31, 0) +#define SYSCFG_ETHCR_ETH_SEL_MASK GENMASK(6, 4) #define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2) #define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1) #define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0) -#define SYSCFG_ETHCR_ETH_SEL_MII 0 -#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4) -#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6) - /* STM32MPx register definitions * * Below table summarizes the clock requirement and clock sources for @@ -232,11 +227,14 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat) return -EINVAL; } -static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) +static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat, + u8 phy_intf_sel) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val = 0; + int val; + + val = FIELD_PREP(SYSCFG_PMCR_PHY_INTF_SEL_MASK, phy_intf_sel); switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_MII: @@ -250,12 +248,10 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) val |= SYSCFG_PMCR_ETH_SEL_MII; break; case PHY_INTERFACE_MODE_GMII: - val = SYSCFG_PMCR_ETH_SEL_GMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; break; case PHY_INTERFACE_MODE_RMII: - val = SYSCFG_PMCR_ETH_SEL_RMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; break; @@ -263,7 +259,6 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - val = SYSCFG_PMCR_ETH_SEL_RGMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; break; @@ -288,18 +283,20 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) dwmac->mode_mask, val); } -static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) +static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat, + u8 phy_intf_sel) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val = 0; + int val; + + val = FIELD_PREP(SYSCFG_ETHCR_ETH_SEL_MASK, phy_intf_sel); switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_MII: /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */ break; case PHY_INTERFACE_MODE_RMII: - val = SYSCFG_ETHCR_ETH_SEL_RMII; if (dwmac->enable_eth_ck) { /* Internal clock ETH_CLK of 50MHz from RCC is used */ val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL; @@ -309,8 +306,6 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - val = SYSCFG_ETHCR_ETH_SEL_RGMII; - fallthrough; case PHY_INTERFACE_MODE_GMII: if (dwmac->enable_eth_ck) { /* Internal clock ETH_CLK of 125MHz from RCC is used */ @@ -337,7 +332,7 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - int ret; + int phy_intf_sel, ret; ret = stm32mp1_select_ethck_external(plat_dat); if (ret) @@ -347,10 +342,19 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { + dev_err(dwmac->dev, "Mode %s not supported\n", + phy_modes(plat_dat->phy_interface)); + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; + } + if (!dwmac->ops->is_mp2) - return stm32mp1_configure_pmcr(plat_dat); + return stm32mp1_configure_pmcr(plat_dat, phy_intf_sel); else - return stm32mp2_configure_syscfg(plat_dat); + return stm32mp2_configure_syscfg(plat_dat, phy_intf_sel); } static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 5d871b2cd111..7434d4bbb526 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1040,15 +1040,10 @@ static const struct stmmac_ops sun8i_dwmac_ops = { .set_mac_loopback = sun8i_dwmac_set_mac_loopback, }; -static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) +static int sun8i_dwmac_setup(void *ppriv, struct mac_device_info *mac) { - struct mac_device_info *mac; struct stmmac_priv *priv = ppriv; - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); - if (!mac) - return NULL; - mac->pcsr = priv->ioaddr; mac->mac = &sun8i_dwmac_ops; mac->dma = &sun8i_dwmac_dma_ops; @@ -1079,7 +1074,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) /* Synopsys Id is not available */ priv->synopsys_id = 0; - return mac; + return 0; } static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node) @@ -1192,7 +1187,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; - plat_dat->setup = sun8i_dwmac_setup; + plat_dat->mac_setup = sun8i_dwmac_setup; plat_dat->tx_fifo_size = 4096; plat_dat->rx_fifo_size = 16384; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 1eadcf5d1ad6..7f560d78209d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -136,7 +136,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. */ plat_dat->tx_coe = 1; - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->bsp_priv = gmac; plat_dat->init = sun7i_gmac_init; plat_dat->exit = sun7i_gmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index dc903b846b1b..d765acbe3754 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -308,7 +308,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) goto disable_clks; } - plat->has_xgmac = 1; + plat->core_type = DWMAC_CORE_XGMAC; plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; plat->bsp_priv = mgbe; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index bd65d4239054..9497b13a5753 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -42,10 +42,6 @@ #define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN) -#define ETHER_CONFIG_INTF_MII 0 -#define ETHER_CONFIG_INTF_RGMII BIT(0) -#define ETHER_CONFIG_INTF_RMII BIT(2) - struct visconti_eth { void __iomem *reg; struct clk *phy_ref_clk; @@ -150,22 +146,12 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac { struct visconti_eth *dwmac = plat_dat->bsp_priv; unsigned int clk_sel_val; - u32 phy_intf_sel; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - phy_intf_sel = ETHER_CONFIG_INTF_RGMII; - break; - case PHY_INTERFACE_MODE_MII: - phy_intf_sel = ETHER_CONFIG_INTF_MII; - break; - case PHY_INTERFACE_MODE_RMII: - phy_intf_sel = ETHER_CONFIG_INTF_RMII; - break; - default: + int phy_intf_sel; + + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 0c011a47d5a3..697bba641e05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -38,11 +38,10 @@ #define GMAC_INT_DISABLE_PCSAN BIT(2) #define GMAC_INT_DISABLE_PMT BIT(3) #define GMAC_INT_DISABLE_TIMESTAMP BIT(9) -#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_RGMII | \ GMAC_INT_DISABLE_PCSLINK | \ - GMAC_INT_DISABLE_PCSAN) -#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ - GMAC_INT_DISABLE_PCS) + GMAC_INT_DISABLE_PCSAN | \ + GMAC_INT_DISABLE_TIMESTAMP) /* PMT Control and Status */ #define GMAC_PMT 0x0000002c diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fe776ddf6889..a2ae136d2c0e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -22,47 +22,35 @@ #include "stmmac_ptp.h" #include "dwmac1000.h" +static int dwmac1000_pcs_init(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.pcs) + return 0; + + return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE, + GMAC_INT_DISABLE_PCSLINK | + GMAC_INT_DISABLE_PCSAN); +} + static void dwmac1000_core_init(struct mac_device_info *hw, struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_CONTROL); int mtu = dev->mtu; + u32 value; /* Configure GMAC core */ - value |= GMAC_CORE_INIT; + value = readl(ioaddr + GMAC_CONTROL); if (mtu > 1500) value |= GMAC_CONTROL_2K; if (mtu > 2000) value |= GMAC_CONTROL_JE; - if (hw->ps) { - value |= GMAC_CONTROL_TE; - - value &= ~hw->link.speed_mask; - switch (hw->ps) { - case SPEED_1000: - value |= hw->link.speed1000; - break; - case SPEED_100: - value |= hw->link.speed100; - break; - case SPEED_10: - value |= hw->link.speed10; - break; - } - } - - writel(value, ioaddr + GMAC_CONTROL); + writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONTROL); /* Mask GMAC interrupts */ - value = GMAC_INT_DEFAULT_MASK; - - if (hw->pcs) - value &= ~GMAC_INT_DISABLE_PCS; - - writel(value, ioaddr + GMAC_INT_MASK); + writel(GMAC_INT_DEFAULT_MASK, ioaddr + GMAC_INT_MASK); #ifdef STMMAC_VLAN_TAG_USED /* Tag detection without filtering */ @@ -70,6 +58,20 @@ static void dwmac1000_core_init(struct mac_device_info *hw, #endif } +static void dwmac1000_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + GMAC_INT_MASK; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) | disable; + value &= ~enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); +} + static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) { void __iomem *ioaddr = hw->pcsr; @@ -263,39 +265,6 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) writel(pmt, ioaddr + GMAC_PMT); } -/* RGMII or SMII interface */ -static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) -{ - u32 status; - - status = readl(ioaddr + GMAC_RGSMIIIS); - x->irq_rgmii_n++; - - /* Check the link status */ - if (status & GMAC_RGSMIIIS_LNKSTS) { - int speed_value; - - x->pcs_link = 1; - - speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> - GMAC_RGSMIIIS_SPEED_SHIFT); - if (speed_value == GMAC_RGSMIIIS_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_RGSMIIIS_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; - - x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); - - pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_info("Link is Down\n"); - } -} - static int dwmac1000_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -337,9 +306,6 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - if (intr_status & PCS_RGSMIIIS_IRQ) - dwmac1000_rgsmii(ioaddr, x); - return ret; } @@ -394,9 +360,9 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) } static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane, - bool srgmi_ral, bool loopback) + bool srgmi_ral) { - dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral); } static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -488,7 +454,9 @@ static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable) } const struct stmmac_ops dwmac1000_ops = { + .pcs_init = dwmac1000_pcs_init, .core_init = dwmac1000_core_init, + .irq_modify = dwmac1000_irq_modify, .set_mac = stmmac_set_mac, .rx_ipc = dwmac1000_rx_ipc_enable, .dump_regs = dwmac1000_dump_regs, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 3dec1a264cf6..3cb733781e1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -106,9 +106,6 @@ #define GMAC_INT_LPI_EN BIT(5) #define GMAC_INT_TSIE BIT(12) -#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ - GMAC_INT_PCS_ANE) - #define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \ GMAC_INT_TSIE) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index d85bc0bb5c3c..a4282fd7c3c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -22,51 +22,51 @@ #include "dwmac4.h" #include "dwmac5.h" +static int dwmac4_pcs_init(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.pcs) + return 0; + + return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE, + GMAC_INT_PCS_LINK | GMAC_INT_PCS_ANE); +} + static void dwmac4_core_init(struct mac_device_info *hw, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_CONFIG); unsigned long clk_rate; + u32 value; - value |= GMAC_CORE_INIT; - - if (hw->ps) { - value |= GMAC_CONFIG_TE; - - value &= hw->link.speed_mask; - switch (hw->ps) { - case SPEED_1000: - value |= hw->link.speed1000; - break; - case SPEED_100: - value |= hw->link.speed100; - break; - case SPEED_10: - value |= hw->link.speed10; - break; - } - } - - writel(value, ioaddr + GMAC_CONFIG); + value = readl(ioaddr + GMAC_CONFIG); + writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONFIG); /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */ clk_rate = clk_get_rate(priv->plat->stmmac_clk); writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER); /* Enable GMAC interrupts */ - value = GMAC_INT_DEFAULT_ENABLE; - - if (hw->pcs) - value |= GMAC_PCS_IRQ_DEFAULT; - - writel(value, ioaddr + GMAC_INT_EN); + writel(GMAC_INT_DEFAULT_ENABLE, ioaddr + GMAC_INT_EN); if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) init_waitqueue_head(&priv->tstamp_busy_wait); } +static void dwmac4_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + GMAC_INT_EN; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) & ~disable; + value |= enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); +} + static void dwmac4_update_caps(struct stmmac_priv *priv) { if (priv->plat->tx_queues_to_use > 1) @@ -583,43 +583,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } } -static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral, - bool loopback) -{ - dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); -} - -/* RGMII or SMII interface */ -static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) +static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral) { - u32 status; - - status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); - x->irq_rgmii_n++; - - /* Check the link status */ - if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { - int speed_value; - - x->pcs_link = 1; - - speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> - GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); - if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; - - x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD); - - pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_info("Link is Down\n"); - } + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral); } static int dwmac4_irq_mtl_status(struct stmmac_priv *priv, @@ -693,8 +659,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw, } dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - if (intr_status & PCS_RGSMIIIS_IRQ) - dwmac4_phystatus(ioaddr, x); return ret; } @@ -929,7 +893,9 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, } const struct stmmac_ops dwmac4_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -963,7 +929,9 @@ const struct stmmac_ops dwmac4_ops = { }; const struct stmmac_ops dwmac410_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -999,7 +967,9 @@ const struct stmmac_ops dwmac410_ops = { }; const struct stmmac_ops dwmac510_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0d408ee17f33..e48cfa05000c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -79,6 +79,7 @@ #define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_PSRQ_SHIFT(x) ((x) * 8) #define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_INT_TSIS BIT(12) #define XGMAC_LPIIS BIT(5) #define XGMAC_PMTIS BIT(4) #define XGMAC_INT_EN 0x000000b4 @@ -173,6 +174,8 @@ #define XGMAC_MDIO_ADDR 0x00000200 #define XGMAC_MDIO_DATA 0x00000204 #define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_GPIO_STATUS 0x0000027c +#define XGMAC_GPIO_GPO0 BIT(16) #define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) #define XGMAC_ADDR_MAX 32 #define XGMAC_AE BIT(31) @@ -220,6 +223,8 @@ #define XGMAC_OB BIT(0) #define XGMAC_RSS_DATA 0x00000c8c #define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25 #define XGMAC_TXTSC BIT(15) #define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 #define XGMAC_TXTSSTSLO GENMASK(30, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 00e929bf280b..b40b3ea50e25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -23,30 +23,23 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, tx = readl(ioaddr + XGMAC_TX_CONFIG); rx = readl(ioaddr + XGMAC_RX_CONFIG); - tx |= XGMAC_CORE_INIT_TX; - rx |= XGMAC_CORE_INIT_RX; - - if (hw->ps) { - tx |= XGMAC_CONFIG_TE; - tx &= ~hw->link.speed_mask; + writel(tx | XGMAC_CORE_INIT_TX, ioaddr + XGMAC_TX_CONFIG); + writel(rx | XGMAC_CORE_INIT_RX, ioaddr + XGMAC_RX_CONFIG); + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); +} - switch (hw->ps) { - case SPEED_10000: - tx |= hw->link.xgmii.speed10000; - break; - case SPEED_2500: - tx |= hw->link.speed2500; - break; - case SPEED_1000: - default: - tx |= hw->link.speed1000; - break; - } - } +static void dwxgmac2_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + XGMAC_INT_EN; + unsigned long flags; + u32 value; - writel(tx, ioaddr + XGMAC_TX_CONFIG); - writel(rx, ioaddr + XGMAC_RX_CONFIG); - writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) & ~disable; + value |= enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); } static void dwxgmac2_update_caps(struct stmmac_priv *priv) @@ -1432,6 +1425,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, + .irq_modify = dwxgmac2_irq_modify, .update_caps = dwxgmac2_update_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, @@ -1487,6 +1481,7 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, const struct stmmac_ops dwxlgmac2_ops = { .core_init = dwxgmac2_core_init, + .irq_modify = dwxgmac2_irq_modify, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxlgmac2_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 3f7c765dcb79..014f7cd79a3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -13,31 +13,42 @@ #include "dwmac4_descs.h" #include "dwxgmac2.h" -static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +struct stmmac_version { + u8 snpsver; + u8 dev_id; +}; + +static void stmmac_get_version(struct stmmac_priv *priv, + struct stmmac_version *ver) { - u32 reg = readl(priv->ioaddr + id_reg); + enum dwmac_core_type core_type = priv->plat->core_type; + unsigned int version_offset; + u32 version; - if (!reg) { - dev_info(priv->device, "Version ID not available\n"); - return 0x0; - } + ver->snpsver = 0; + ver->dev_id = 0; - dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", - (unsigned int)(reg & GENMASK(15, 8)) >> 8, - (unsigned int)(reg & GENMASK(7, 0))); - return reg & GENMASK(7, 0); -} + if (core_type == DWMAC_CORE_MAC100) + return; -static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg) -{ - u32 reg = readl(priv->ioaddr + id_reg); + if (core_type == DWMAC_CORE_GMAC) + version_offset = GMAC_VERSION; + else + version_offset = GMAC4_VERSION; - if (!reg) { + version = readl(priv->ioaddr + version_offset); + if (version == 0) { dev_info(priv->device, "Version ID not available\n"); - return 0x0; + return; } - return (reg & GENMASK(15, 8)) >> 8; + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + FIELD_GET(DWMAC_USERVER, version), + FIELD_GET(DWMAC_SNPSVER, version)); + + ver->snpsver = FIELD_GET(DWMAC_SNPSVER, version); + if (core_type == DWMAC_CORE_XGMAC) + ver->dev_id = FIELD_GET(DWMAC_USERVER, version); } static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) @@ -92,12 +103,10 @@ static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv) return 0; } -int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr) +int stmmac_reset(struct stmmac_priv *priv) { - struct plat_stmmacenet_data *plat = priv ? priv->plat : NULL; - - if (!priv) - return -EINVAL; + struct plat_stmmacenet_data *plat = priv->plat; + void __iomem *ioaddr = priv->ioaddr; if (plat && plat->fix_soc_reset) return plat->fix_soc_reset(priv, ioaddr); @@ -106,9 +115,7 @@ int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr) } static const struct stmmac_hwif_entry { - bool gmac; - bool gmac4; - bool xgmac; + enum dwmac_core_type core_type; u32 min_id; u32 dev_id; const struct stmmac_regs_off regs; @@ -127,9 +134,7 @@ static const struct stmmac_hwif_entry { } stmmac_hw[] = { /* NOTE: New HW versions shall go to the end of this table */ { - .gmac = false, - .gmac4 = false, - .xgmac = false, + .core_type = DWMAC_CORE_MAC100, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -146,9 +151,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac100_setup, .quirks = stmmac_dwmac1_quirks, }, { - .gmac = true, - .gmac4 = false, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -165,9 +168,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac1000_setup, .quirks = stmmac_dwmac1_quirks, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = 0, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -187,9 +188,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_4_00, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -210,9 +209,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_4_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -233,9 +230,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_5_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -256,9 +251,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = false, - .xgmac = true, + .core_type = DWMAC_CORE_XGMAC, .min_id = DWXGMAC_CORE_2_10, .dev_id = DWXGMAC_ID, .regs = { @@ -280,9 +273,7 @@ static const struct stmmac_hwif_entry { .setup = dwxgmac2_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = false, - .xgmac = true, + .core_type = DWMAC_CORE_XGMAC, .min_id = DWXLGMAC_CORE_2_00, .dev_id = DWXLGMAC_ID, .regs = { @@ -306,100 +297,114 @@ static const struct stmmac_hwif_entry { }, }; +static const struct stmmac_hwif_entry * +stmmac_hwif_find(enum dwmac_core_type core_type, u8 snpsver, u8 dev_id) +{ + const struct stmmac_hwif_entry *entry; + int i; + + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (core_type != entry->core_type) + continue; + /* Use synopsys_id var because some setups can override this */ + if (snpsver < entry->min_id) + continue; + if (core_type == DWMAC_CORE_XGMAC && + dev_id != entry->dev_id) + continue; + + return entry; + } + + return NULL; +} + int stmmac_hwif_init(struct stmmac_priv *priv) { - bool needs_xgmac = priv->plat->has_xgmac; - bool needs_gmac4 = priv->plat->has_gmac4; - bool needs_gmac = priv->plat->has_gmac; + enum dwmac_core_type core_type = priv->plat->core_type; const struct stmmac_hwif_entry *entry; + struct stmmac_version version; struct mac_device_info *mac; bool needs_setup = true; - u32 id, dev_id = 0; - int i, ret; - - if (needs_gmac) { - id = stmmac_get_id(priv, GMAC_VERSION); - } else if (needs_gmac4 || needs_xgmac) { - id = stmmac_get_id(priv, GMAC4_VERSION); - if (needs_xgmac) - dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION); - } else { - id = 0; - } + int ret; + + stmmac_get_version(priv, &version); /* Save ID for later use */ - priv->synopsys_id = id; + priv->synopsys_id = version.snpsver; /* Lets assume some safe values first */ - priv->ptpaddr = priv->ioaddr + - (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); - priv->mmcaddr = priv->ioaddr + - (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); - if (needs_gmac4) + if (core_type == DWMAC_CORE_GMAC4) { + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; + priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET; - else if (needs_xgmac) - priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET; - - /* Check for HW specific setup first */ - if (priv->plat->setup) { - mac = priv->plat->setup(priv); - needs_setup = false; } else { - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; + priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + if (core_type == DWMAC_CORE_XGMAC) + priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET; } + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); if (!mac) return -ENOMEM; + /* Check for HW specific setup first */ + if (priv->plat->mac_setup) { + ret = priv->plat->mac_setup(priv, mac); + if (ret) + return ret; + + needs_setup = false; + } + + spin_lock_init(&mac->irq_ctrl_lock); + /* Fallback to generic HW */ - for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { - entry = &stmmac_hw[i]; - if (needs_gmac ^ entry->gmac) - continue; - if (needs_gmac4 ^ entry->gmac4) - continue; - if (needs_xgmac ^ entry->xgmac) - continue; - /* Use synopsys_id var because some setups can override this */ - if (priv->synopsys_id < entry->min_id) - continue; - if (needs_xgmac && (dev_id ^ entry->dev_id)) - continue; + /* Use synopsys_id var because some setups can override this */ + entry = stmmac_hwif_find(core_type, priv->synopsys_id, version.dev_id); + if (!entry) { + dev_err(priv->device, + "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + version.snpsver, core_type == DWMAC_CORE_GMAC, + core_type == DWMAC_CORE_GMAC4); - /* Only use generic HW helpers if needed */ - mac->desc = mac->desc ? : entry->desc; - mac->dma = mac->dma ? : entry->dma; - mac->mac = mac->mac ? : entry->mac; - mac->ptp = mac->ptp ? : entry->hwtimestamp; - mac->mode = mac->mode ? : entry->mode; - mac->tc = mac->tc ? : entry->tc; - mac->mmc = mac->mmc ? : entry->mmc; - mac->est = mac->est ? : entry->est; - mac->vlan = mac->vlan ? : entry->vlan; - - priv->hw = mac; - priv->fpe_cfg.reg = entry->regs.fpe_reg; - priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; - priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; - memcpy(&priv->ptp_clock_ops, entry->ptp, - sizeof(struct ptp_clock_info)); - if (entry->est) - priv->estaddr = priv->ioaddr + entry->regs.est_off; - - /* Entry found */ - if (needs_setup) { - ret = entry->setup(priv); - if (ret) - return ret; - } + return -EINVAL; + } - /* Save quirks, if needed for posterior use */ - priv->hwif_quirks = entry->quirks; - return 0; + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + mac->mmc = mac->mmc ? : entry->mmc; + mac->est = mac->est ? : entry->est; + mac->vlan = mac->vlan ? : entry->vlan; + + priv->hw = mac; + priv->fpe_cfg.reg = entry->regs.fpe_reg; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + memcpy(&priv->ptp_clock_ops, entry->ptp, + sizeof(struct ptp_clock_info)); + + if (entry->est) + priv->estaddr = priv->ioaddr + entry->regs.est_off; + + /* Entry found */ + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; } - dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", - id, needs_gmac, needs_gmac4); - return -EINVAL; + /* Save quirks, if needed for posterior use */ + priv->hwif_quirks = entry->quirks; + + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 14dbe0685997..d359722100fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -313,10 +313,14 @@ enum stmmac_lpi_mode { /* Helpers to program the MAC core */ struct stmmac_ops { + /* Initialise any PCS instances */ + int (*pcs_init)(struct stmmac_priv *priv); /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); /* Update MAC capabilities */ void (*update_caps)(struct stmmac_priv *priv); + /* Change the interrupt enable setting. Enable takes precedence. */ + void (*irq_modify)(struct mac_device_info *hw, u32 disable, u32 enable); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -374,8 +378,8 @@ struct stmmac_ops { struct stmmac_extra_stats *x, u32 rx_queues, u32 tx_queues); /* PCS calls */ - void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, bool srgmi_ral, - bool loopback); + void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, + bool srgmi_ral); /* Safety Features */ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_feature_cfg *safety_cfg); @@ -413,10 +417,14 @@ struct stmmac_ops { u32 pclass); }; +#define stmmac_mac_pcs_init(__priv) \ + stmmac_do_callback(__priv, mac, pcs_init, __priv) #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) #define stmmac_mac_update_caps(__priv) \ stmmac_do_void_callback(__priv, mac, update_caps, __priv) +#define stmmac_mac_irq_modify(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, irq_modify, (__priv)->hw, __args) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ @@ -690,7 +698,7 @@ extern const struct stmmac_tc_ops dwmac510_tc_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ -int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr); +int stmmac_reset(struct stmmac_priv *priv); int stmmac_hwif_init(struct stmmac_priv *priv); #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 7ca5477be390..0ea74c88a779 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -25,6 +25,8 @@ #include <net/xdp.h> #include <uapi/linux/bpf.h> +struct stmmac_pcs; + struct stmmac_resources { void __iomem *addr; u8 mac[ETH_ALEN]; @@ -257,6 +259,7 @@ struct stmmac_priv { u32 sarc_type; u32 rx_riwt[MTL_MAX_RX_QUEUES]; int hwts_rx_en; + bool tsfupdt_coarse; void __iomem *ioaddr; struct net_device *dev; @@ -273,6 +276,8 @@ struct stmmac_priv { unsigned int pause_time; struct mii_bus *mii; + struct stmmac_pcs *integrated_pcs; + struct phylink_config phylink_config; struct phylink *phylink; @@ -287,6 +292,7 @@ struct stmmac_priv { int hw_cap_support; int synopsys_id; u32 msg_enable; + /* Our MAC Wake-on-Lan options */ int wolopts; int wol_irq; u32 gmii_address_bus_config; @@ -364,6 +370,8 @@ struct stmmac_priv { /* XDP BPF Program */ unsigned long *af_xdp_zc_qps; struct bpf_prog *xdp_prog; + + struct devlink *devlink; }; enum stmmac_state { @@ -375,19 +383,11 @@ enum stmmac_state { extern const struct dev_pm_ops stmmac_simple_pm_ops; -static inline bool stmmac_wol_enabled_mac(struct stmmac_priv *priv) -{ - return priv->plat->pmt && device_may_wakeup(priv->device); -} - -static inline bool stmmac_wol_enabled_phy(struct stmmac_priv *priv) -{ - return !priv->plat->pmt && device_may_wakeup(priv->device); -} - int stmmac_mdio_unregister(struct net_device *ndev); int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); +void stmmac_mdio_lock(struct stmmac_priv *priv); +void stmmac_mdio_unlock(struct stmmac_priv *priv); int stmmac_pcs_setup(struct net_device *ndev); void stmmac_pcs_clean(struct net_device *ndev); void stmmac_set_ethtool_ops(struct net_device *netdev); @@ -396,6 +396,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_xdp_open(struct net_device *dev); void stmmac_xdp_release(struct net_device *dev); +int stmmac_get_phy_intf_sel(phy_interface_t interface); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); void stmmac_dvr_remove(struct device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c index 4b513d27a988..afc516059b89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c @@ -53,7 +53,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, } ctrl = readl(est_addr + EST_CONTROL); - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { ctrl &= ~EST_XGMAC_PTOV; ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) << EST_XGMAC_PTOV_SHIFT; @@ -148,7 +148,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, } if (status & EST_BTRE) { - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { btrl = FIELD_GET(EST_XGMAC_BTRL, status); btrl_max = FIELD_MAX(EST_XGMAC_BTRL); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 39fa1ec92f82..b155e71aac51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -303,9 +303,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (priv->plat->has_gmac || priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC || + priv->plat->core_type == DWMAC_CORE_GMAC4) strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); - else if (priv->plat->has_xgmac) + else if (priv->plat->core_type == DWMAC_CORE_XGMAC) strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); else strscpy(info->driver, MAC100_ETHTOOL_NAME, @@ -322,47 +323,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) && - (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII)) { - u32 supported, advertising, lp_advertising; - - if (!priv->xstats.pcs_link) { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - return 0; - } - cmd->base.duplex = priv->xstats.pcs_duplex; - - cmd->base.speed = priv->xstats.pcs_speed; - - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ - - ethtool_convert_link_mode_to_legacy_u32( - &supported, cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32( - &advertising, cmd->link_modes.advertising); - ethtool_convert_link_mode_to_legacy_u32( - &lp_advertising, cmd->link_modes.lp_advertising); - - /* Reg49[3] always set because ANE is always supported */ - cmd->base.autoneg = ADVERTISED_Autoneg; - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; - lp_advertising |= ADVERTISED_Autoneg; - - cmd->base.port = PORT_OTHER; - - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.advertising, advertising); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.lp_advertising, lp_advertising); - - return 0; - } - return phylink_ethtool_ksettings_get(priv->phylink, cmd); } @@ -372,20 +332,6 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) && - (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII)) { - /* Only support ANE */ - if (cmd->base.autoneg != AUTONEG_ENABLE) - return -EINVAL; - - mutex_lock(&priv->lock); - stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); - mutex_unlock(&priv->lock); - - return 0; - } - return phylink_ethtool_ksettings_set(priv->phylink, cmd); } @@ -406,9 +352,9 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) return XGMAC_REGSIZE * 4; - else if (priv->plat->has_gmac4) + else if (priv->plat->core_type == DWMAC_CORE_GMAC4) return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -423,12 +369,12 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); /* Copy DMA registers to where ethtool expects them */ - if (priv->plat->has_gmac4) { + if (priv->plat->core_type == DWMAC_CORE_GMAC4) { /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], NUM_DWMAC4_DMA_REGS * 4); - } else if (!priv->plat->has_xgmac) { + } else if (priv->plat->core_type != DWMAC_CORE_XGMAC) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); @@ -479,11 +425,7 @@ stmmac_get_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->hw->pcs) { - pause->autoneg = 1; - } else { - phylink_ethtool_get_pauseparam(priv->phylink, pause); - } + phylink_ethtool_get_pauseparam(priv->phylink, pause); } static int @@ -492,12 +434,7 @@ stmmac_set_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->hw->pcs) { - pause->autoneg = 1; - return 0; - } else { - return phylink_ethtool_set_pauseparam(priv->phylink, pause); - } + return phylink_ethtool_set_pauseparam(priv->phylink, pause); } static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) @@ -787,41 +724,14 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - if (!priv->plat->pmt) - return phylink_ethtool_get_wol(priv->phylink, wol); - - mutex_lock(&priv->lock); - if (device_can_wakeup(priv->device)) { - wol->supported = WAKE_MAGIC | WAKE_UCAST; - if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame) - wol->supported &= ~WAKE_MAGIC; - wol->wolopts = priv->wolopts; - } - mutex_unlock(&priv->lock); + return phylink_ethtool_get_wol(priv->phylink, wol); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - if (!device_can_wakeup(priv->device)) - return -EOPNOTSUPP; - - if (!priv->plat->pmt) { - int ret = phylink_ethtool_set_wol(priv->phylink, wol); - - if (!ret) - device_set_wakeup_enable(priv->device, !!wol->wolopts); - return ret; - } - - device_set_wakeup_enable(priv->device, !!wol->wolopts); - - mutex_lock(&priv->lock); - priv->wolopts = wol->wolopts; - mutex_unlock(&priv->lock); - - return 0; + return phylink_ethtool_set_wol(priv->phylink, wol); } static int stmmac_ethtool_op_get_eee(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c index 75b470ee621a..c54c70224351 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c @@ -70,8 +70,10 @@ static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enabl struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg); const struct stmmac_fpe_reg *reg = cfg->reg; void __iomem *ioaddr = priv->ioaddr; + unsigned long flags; u32 value; + spin_lock_irqsave(&priv->hw->irq_ctrl_lock, flags); value = readl(ioaddr + reg->int_en_reg); if (pmac_enable) { @@ -86,6 +88,7 @@ static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enabl } writel(value, ioaddr + reg->int_en_reg); + spin_unlock_irqrestore(&priv->hw->irq_ctrl_lock, flags); } static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b90ecd3a55e..d202f604161e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -40,12 +40,14 @@ #include <linux/phylink.h> #include <linux/udp.h> #include <linux/bpf_trace.h> +#include <net/devlink.h> #include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac_fpe.h" #include "stmmac.h" +#include "stmmac_pcs.h" #include "stmmac_xdp.h" #include <linux/reset.h> #include <linux/of_mdio.h> @@ -57,8 +59,7 @@ * with fine resolution and binary rollover. This avoid non-monotonic behavior * (clock jumps) when changing timestamping settings at runtime. */ -#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ - PTP_TCR_TSCTRLSSR) +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR) #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -147,6 +148,15 @@ static void stmmac_exit_fs(struct net_device *dev); #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) +struct stmmac_devlink_priv { + struct stmmac_priv *stmmac_priv; +}; + +enum stmmac_dl_param_id { + STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + STMMAC_DEVLINK_PARAM_ID_TS_COARSE, +}; + /** * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock * @bsp_priv: BSP private data structure (unused) @@ -445,7 +455,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (!priv->hwts_rx_en) return; /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + if (dwmac_is_xmac(priv->plat->core_type)) desc = np; /* Check if timestamp is available */ @@ -463,6 +473,33 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, } } +static void stmmac_update_subsecond_increment(struct stmmac_priv *priv) +{ + bool xmac = dwmac_is_xmac(priv->plat->core_type); + u32 sec_inc = 0; + u64 temp = 0; + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); +} + /** * stmmac_hwtstamp_set - control hardware timestamping. * @dev: device pointer. @@ -647,6 +684,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON; priv->systime_flags = STMMAC_HWTS_ACTIVE; + if (!priv->tsfupdt_coarse) + priv->systime_flags |= PTP_TCR_TSCFUPDT; if (priv->hwts_tx_en || priv->hwts_rx_en) { priv->systime_flags |= tstamp_all | ptp_v2 | @@ -696,10 +735,7 @@ static int stmmac_hwtstamp_get(struct net_device *dev, static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) { - bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; struct timespec64 now; - u32 sec_inc = 0; - u64 temp = 0; if (!priv->plat->clk_ptp_rate) { netdev_err(priv->dev, "Invalid PTP clock rate"); @@ -709,23 +745,7 @@ static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, priv->ptpaddr, - priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment for later use */ - priv->sub_second_inc = sec_inc; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + stmmac_update_subsecond_increment(priv); /* initialize system time */ ktime_get_real_ts64(&now); @@ -745,7 +765,7 @@ static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, */ static int stmmac_init_timestamping(struct stmmac_priv *priv) { - bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + bool xmac = dwmac_is_xmac(priv->plat->core_type); int ret; if (priv->plat->ptp_clk_freq_config) @@ -756,7 +776,8 @@ static int stmmac_init_timestamping(struct stmmac_priv *priv) return -EOPNOTSUPP; } - ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE | + PTP_TCR_TSCFUPDT); if (ret) { netdev_warn(priv->dev, "PTP init failed\n"); return ret; @@ -850,6 +871,13 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, return pcs; } + /* The PCS control register is only relevant for SGMII, TBI and RTBI + * modes. We no longer support TBI or RTBI, so only configure this + * register when operating in SGMII mode with the integrated PCS. + */ + if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs) + return &priv->integrated_pcs->pcs; + return NULL; } @@ -859,6 +887,18 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, /* Nothing to do, xpcs_config() handles everything */ } +static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + if (priv->plat->mac_finish) + priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); + + return 0; +} + static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -1053,14 +1093,16 @@ static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, return 0; } -static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts, + const u8 *sopass) { - struct net_device *ndev = to_net_dev(config->dev); - struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); - if (priv->plat->mac_finish) - priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); + device_set_wakeup_enable(priv->device, !!wolopts); + + mutex_lock(&priv->lock); + priv->wolopts = wolopts; + mutex_unlock(&priv->lock); return 0; } @@ -1069,11 +1111,12 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, + .mac_finish = stmmac_mac_finish, .mac_link_down = stmmac_mac_link_down, .mac_link_up = stmmac_mac_link_up, .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi, .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi, - .mac_finish = stmmac_mac_finish, + .mac_wol_set = stmmac_mac_wol_set, }; /** @@ -1086,17 +1129,25 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { static void stmmac_check_pcs_mode(struct stmmac_priv *priv) { int interface = priv->plat->phy_interface; + int speed = priv->plat->mac_port_sel_speed; + + if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) { + netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_SGMII; + + switch (speed) { + case SPEED_10: + case SPEED_100: + case SPEED_1000: + priv->hw->reverse_sgmii_enable = true; + break; - if (priv->dma_cap.pcs) { - if ((interface == PHY_INTERFACE_MODE_RGMII) || - (interface == PHY_INTERFACE_MODE_RGMII_ID) || - (interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { - netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); - priv->hw->pcs = STMMAC_PCS_RGMII; - } else if (interface == PHY_INTERFACE_MODE_SGMII) { - netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); - priv->hw->pcs = STMMAC_PCS_SGMII; + default: + dev_warn(priv->device, "invalid port speed\n"); + fallthrough; + case 0: + priv->hw->reverse_sgmii_enable = false; + break; } } } @@ -1174,18 +1225,10 @@ static int stmmac_init_phy(struct net_device *dev) phylink_ethtool_set_eee(priv->phylink, &eee); } - if (!priv->plat->pmt) { - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; - - phylink_ethtool_get_wol(priv->phylink, &wol); - device_set_wakeup_capable(priv->device, !!wol.supported); - device_set_wakeup_enable(priv->device, !!wol.wolopts); - } - return 0; } -static int stmmac_phy_setup(struct stmmac_priv *priv) +static int stmmac_phylink_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; struct phylink_config *config; @@ -1250,6 +1293,16 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) config->eee_enabled_default = true; } + config->wol_phy_speed_ctrl = true; + if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) { + config->wol_phy_legacy = true; + } else { + if (priv->dma_cap.pmt_remote_wake_up) + config->wol_mac_support |= WAKE_UCAST; + if (priv->dma_cap.pmt_magic_frame) + config->wol_mac_support |= WAKE_MAGIC; + } + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1339,9 +1392,9 @@ static unsigned int stmmac_rx_offset(struct stmmac_priv *priv) return NET_SKB_PAD; } -static int stmmac_set_bfsize(int mtu, int bufsize) +static int stmmac_set_bfsize(int mtu) { - int ret = bufsize; + int ret; if (mtu >= BUF_SIZE_8KiB) ret = BUF_SIZE_16KiB; @@ -2397,7 +2450,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) txfifosz = priv->dma_cap.tx_fifo_size; /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + if (dwmac_is_xmac(priv->plat->core_type)) { rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; } @@ -3029,6 +3082,56 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) } } +int stmmac_get_phy_intf_sel(phy_interface_t interface) +{ + int phy_intf_sel = -EINVAL; + + if (interface == PHY_INTERFACE_MODE_MII || + interface == PHY_INTERFACE_MODE_GMII) + phy_intf_sel = PHY_INTF_SEL_GMII_MII; + else if (phy_interface_mode_is_rgmii(interface)) + phy_intf_sel = PHY_INTF_SEL_RGMII; + else if (interface == PHY_INTERFACE_MODE_SGMII) + phy_intf_sel = PHY_INTF_SEL_SGMII; + else if (interface == PHY_INTERFACE_MODE_RMII) + phy_intf_sel = PHY_INTF_SEL_RMII; + else if (interface == PHY_INTERFACE_MODE_REVMII) + phy_intf_sel = PHY_INTF_SEL_REVMII; + + return phy_intf_sel; +} +EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel); + +static int stmmac_prereset_configure(struct stmmac_priv *priv) +{ + struct plat_stmmacenet_data *plat_dat = priv->plat; + phy_interface_t interface; + int phy_intf_sel, ret; + + if (!plat_dat->set_phy_intf_sel) + return 0; + + interface = plat_dat->phy_interface; + phy_intf_sel = stmmac_get_phy_intf_sel(interface); + if (phy_intf_sel < 0) { + netdev_err(priv->dev, + "failed to get phy_intf_sel for %s: %pe\n", + phy_modes(interface), ERR_PTR(phy_intf_sel)); + return phy_intf_sel; + } + + ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel); + if (ret == -EINVAL) + netdev_err(priv->dev, "platform does not support %s\n", + phy_modes(interface)); + else if (ret < 0) + netdev_err(priv->dev, + "platform failed to set interface %s: %pe\n", + phy_modes(interface), ERR_PTR(ret)); + + return ret; +} + /** * stmmac_init_dma_engine - DMA init. * @priv: driver private structure @@ -3055,7 +3158,11 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) priv->plat->dma_cfg->atds = 1; - ret = stmmac_reset(priv, priv->ioaddr); + ret = stmmac_prereset_configure(priv); + if (ret) + return ret; + + ret = stmmac_reset(priv); if (ret) { netdev_err(priv->dev, "Failed to reset the dma\n"); return ret; @@ -3443,19 +3550,6 @@ static int stmmac_hw_setup(struct net_device *dev) stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); phylink_rx_clk_stop_unblock(priv->phylink); - /* PS and related bits will be programmed according to the speed */ - if (priv->hw->pcs) { - int speed = priv->plat->mac_port_sel_speed; - - if ((speed == SPEED_10) || (speed == SPEED_100) || - (speed == SPEED_1000)) { - priv->hw->ps = speed; - } else { - dev_warn(priv->device, "invalid port speed\n"); - priv->hw->ps = 0; - } - } - /* Initialize the MAC Core */ stmmac_core_init(priv, priv->hw, dev); @@ -3492,9 +3586,6 @@ static int stmmac_hw_setup(struct net_device *dev) } } - if (priv->hw->pcs) - stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); - /* set TX and RX rings length */ stmmac_set_rings_length(priv); @@ -3867,12 +3958,13 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) return ERR_PTR(-ENOMEM); } + /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */ bfsize = stmmac_set_16kib_bfsize(priv, mtu); if (bfsize < 0) bfsize = 0; if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(mtu, 0); + bfsize = stmmac_set_bfsize(mtu); dma_conf->dma_buf_sz = bfsize; /* Chose the tx/rx size from the already defined one in the @@ -3963,8 +4055,6 @@ static int __stmmac_open(struct net_device *dev, stmmac_init_coalesce(priv); phylink_start(priv->phylink); - /* We may have called phylink_speed_down before */ - phylink_speed_up(priv->phylink); ret = stmmac_request_irq(dev); if (ret) @@ -4015,6 +4105,9 @@ static int stmmac_open(struct net_device *dev) kfree(dma_conf); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); + return ret; err_disconnect_phy: @@ -4032,13 +4125,6 @@ static void __stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; - /* If the PHY or MAC has WoL enabled, then the PHY will not be - * suspended when phylink_stop() is called below. Set the PHY - * to its slowest speed to save power. - */ - if (device_may_wakeup(priv->device)) - phylink_speed_down(priv->phylink, false); - /* Stop and disconnect the PHY */ phylink_stop(priv->phylink); @@ -4078,6 +4164,13 @@ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + /* If the PHY or MAC has WoL enabled, then the PHY will not be + * suspended when phylink_stop() is called below. Set the PHY + * to its slowest speed to save power. + */ + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + __stmmac_release(dev); phylink_disconnect_phy(priv->phylink); @@ -4513,7 +4606,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (skb_is_gso(skb) && priv->tso) { if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) return stmmac_tso_xmit(skb, dev); - if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) + if (priv->plat->core_type == DWMAC_CORE_GMAC4 && + (gso & SKB_GSO_UDP_L4)) return stmmac_tso_xmit(skb, dev); } @@ -5971,7 +6065,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) u32 queue; bool xmac; - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + xmac = dwmac_is_xmac(priv->plat->core_type); queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; if (priv->irq_wake) @@ -5985,7 +6079,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) stmmac_fpe_irq_status(priv); /* To handle GMAC own interrupts */ - if ((priv->plat->has_gmac) || xmac) { + if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); if (unlikely(status)) { @@ -5999,15 +6093,6 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) for (queue = 0; queue < queues_count; queue++) stmmac_host_mtl_irq_status(priv, priv->hw, queue); - /* PCS link status */ - if (priv->hw->pcs && - !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { - if (priv->xstats.pcs_link) - netif_carrier_on(priv->dev); - else - netif_carrier_off(priv->dev); - } - stmmac_timestamp_interrupt(priv, priv); } } @@ -6355,7 +6440,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.mbps_1000) ? "Y" : "N"); seq_printf(seq, "\tHalf duplex: %s\n", (priv->dma_cap.half_duplex) ? "Y" : "N"); - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { seq_printf(seq, "\tNumber of Additional MAC address registers: %d\n", priv->dma_cap.multi_addr); @@ -6379,7 +6464,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.time_stamp) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", (priv->dma_cap.atime_stamp) ? "Y" : "N"); - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) seq_printf(seq, "\tTimestamp System Time Source: %s\n", dwxgmac_timestamp_source[priv->dma_cap.tssrc]); seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", @@ -6388,7 +6473,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); if (priv->synopsys_id >= DWMAC_CORE_4_00 || - priv->plat->has_xgmac) { + priv->plat->core_type == DWMAC_CORE_XGMAC) { seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", (priv->dma_cap.rx_coe) ? "Y" : "N"); } else { @@ -7240,13 +7325,21 @@ static int stmmac_hw_init(struct stmmac_priv *priv) * has to be disable and this can be done by passing the * riwt_off field from the platform. */ - if (((priv->synopsys_id >= DWMAC_CORE_3_50) || - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + if ((priv->synopsys_id >= DWMAC_CORE_3_50 || + priv->plat->core_type == DWMAC_CORE_XGMAC) && + !priv->plat->riwt_off) { priv->use_riwt = 1; dev_info(priv->device, "Enable RX Mitigation via HW Watchdog Timer\n"); } + /* Unimplemented PCS init (as indicated by stmmac_do_callback() + * perversely returning -EINVAL) is non-fatal. + */ + ret = stmmac_mac_pcs_init(priv); + if (ret != -EINVAL) + return ret; + return 0; } @@ -7355,7 +7448,7 @@ static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) return -ENODATA; /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + if (dwmac_is_xmac(priv->plat->core_type)) desc_contains_ts = ndesc; /* Check if timestamp is available */ @@ -7373,6 +7466,95 @@ static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, }; +static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct stmmac_devlink_priv *dl_priv = devlink_priv(dl); + struct stmmac_priv *priv = dl_priv->stmmac_priv; + + priv->tsfupdt_coarse = ctx->val.vbool; + + if (priv->tsfupdt_coarse) + priv->systime_flags &= ~PTP_TCR_TSCFUPDT; + else + priv->systime_flags |= PTP_TCR_TSCFUPDT; + + /* In Coarse mode, we can use a smaller subsecond increment, let's + * reconfigure the systime, subsecond increment and addend. + */ + stmmac_update_subsecond_increment(priv); + + return 0; +} + +static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct stmmac_devlink_priv *dl_priv = devlink_priv(dl); + struct stmmac_priv *priv = dl_priv->stmmac_priv; + + ctx->val.vbool = priv->tsfupdt_coarse; + + return 0; +} + +static const struct devlink_param stmmac_devlink_params[] = { + DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + stmmac_dl_ts_coarse_get, + stmmac_dl_ts_coarse_set, NULL), +}; + +/* None of the generic devlink parameters are implemented */ +static const struct devlink_ops stmmac_devlink_ops = {}; + +static int stmmac_register_devlink(struct stmmac_priv *priv) +{ + struct stmmac_devlink_priv *dl_priv; + int ret; + + /* For now, what is exposed over devlink is only relevant when + * timestamping is available and we have a valid ptp clock rate + */ + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) || + !priv->plat->clk_ptp_rate) + return 0; + + priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv), + priv->device); + if (!priv->devlink) + return -ENOMEM; + + dl_priv = devlink_priv(priv->devlink); + dl_priv->stmmac_priv = priv; + + ret = devlink_params_register(priv->devlink, stmmac_devlink_params, + ARRAY_SIZE(stmmac_devlink_params)); + if (ret) + goto dl_free; + + devlink_register(priv->devlink); + return 0; + +dl_free: + devlink_free(priv->devlink); + + return ret; +} + +static void stmmac_unregister_devlink(struct stmmac_priv *priv) +{ + if (!priv->devlink) + return; + + devlink_unregister(priv->devlink); + devlink_params_unregister(priv->devlink, stmmac_devlink_params, + ARRAY_SIZE(stmmac_devlink_params)); + devlink_free(priv->devlink); +} + /** * stmmac_dvr_probe * @device: device pointer @@ -7511,7 +7693,7 @@ int stmmac_dvr_probe(struct device *device, if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) ndev->hw_features |= NETIF_F_GSO_UDP_L4; priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); @@ -7564,7 +7746,7 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + if (dwmac_is_xmac(priv->plat->core_type)) { ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; priv->hw->hw_vlan_en = true; } @@ -7592,22 +7774,23 @@ int stmmac_dvr_probe(struct device *device, /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; - if (priv->plat->has_xgmac) + + if (priv->plat->core_type == DWMAC_CORE_XGMAC) ndev->max_mtu = XGMAC_JUMBO_LEN; - else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00) ndev->max_mtu = JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); - /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu - * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. + + /* Warn if the platform's maxmtu is smaller than the minimum MTU, + * otherwise clamp the maximum MTU above to the platform's maxmtu. */ - if ((priv->plat->maxmtu < ndev->max_mtu) && - (priv->plat->maxmtu >= ndev->min_mtu)) - ndev->max_mtu = priv->plat->maxmtu; - else if (priv->plat->maxmtu < ndev->min_mtu) + if (priv->plat->maxmtu < ndev->min_mtu) dev_warn(priv->device, "%s: warning: maxmtu having invalid value (%d)\n", __func__, priv->plat->maxmtu); + else if (priv->plat->maxmtu < ndev->max_mtu) + ndev->max_mtu = priv->plat->maxmtu; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -7637,12 +7820,16 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_pcs_setup; - ret = stmmac_phy_setup(priv); + ret = stmmac_phylink_setup(priv); if (ret) { netdev_err(ndev, "failed to setup phy (%d)\n", ret); goto error_phy_setup; } + ret = stmmac_register_devlink(priv); + if (ret) + goto error_devlink_setup; + ret = register_netdev(ndev); if (ret) { dev_err(priv->device, "%s: ERROR %i registering the device\n", @@ -7665,6 +7852,8 @@ int stmmac_dvr_probe(struct device *device, return ret; error_netdev_register: + stmmac_unregister_devlink(priv); +error_devlink_setup: phylink_destroy(priv->phylink); error_phy_setup: stmmac_pcs_clean(ndev); @@ -7701,6 +7890,8 @@ void stmmac_dvr_remove(struct device *dev) #ifdef CONFIG_DEBUG_FS stmmac_exit_fs(ndev); #endif + stmmac_unregister_devlink(priv); + phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); @@ -7755,7 +7946,7 @@ int stmmac_suspend(struct device *dev) priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); /* Enable Power down mode by programming the PMT regs */ - if (stmmac_wol_enabled_mac(priv)) { + if (priv->wolopts) { stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { @@ -7766,10 +7957,7 @@ int stmmac_suspend(struct device *dev) mutex_unlock(&priv->lock); rtnl_lock(); - if (stmmac_wol_enabled_phy(priv)) - phylink_speed_down(priv->phylink, false); - - phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv)); + phylink_suspend(priv->phylink, !!priv->wolopts); rtnl_unlock(); if (stmmac_fpe_supported(priv)) @@ -7845,7 +8033,7 @@ int stmmac_resume(struct device *dev) * this bit because it can generate problems while resuming * from another devices (e.g. serial console). */ - if (stmmac_wol_enabled_mac(priv)) { + if (priv->wolopts) { mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); mutex_unlock(&priv->lock); @@ -7907,9 +8095,6 @@ int stmmac_resume(struct device *dev) * workqueue thread, which will race with initialisation. */ phylink_resume(priv->phylink); - if (stmmac_wol_enabled_phy(priv)) - phylink_speed_up(priv->phylink); - rtnl_unlock(); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index f408737f6fc7..1e82850f2a25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -301,7 +301,7 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg) struct stmmac_priv *priv = netdev_priv(bus->priv); u32 cmd; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) cmd = MII_GMAC4_READ; else cmd = 0; @@ -344,7 +344,7 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(bus->priv); u32 cmd; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) cmd = MII_GMAC4_WRITE; else cmd = MII_ADDR_GWRITE; @@ -417,7 +417,7 @@ int stmmac_mdio_reset(struct mii_bus *bus) * on MDC, so perform a dummy mdio read. To be updated for GMAC4 * if needed. */ - if (!priv->plat->has_gmac4) + if (priv->plat->core_type != DWMAC_CORE_GMAC4) writel(0, priv->ioaddr + mii_address); #endif return 0; @@ -528,7 +528,7 @@ static u32 stmmac_clk_csr_set(struct stmmac_priv *priv) value = 0; } - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { if (clk_rate > 400000000) value = 0x5; else if (clk_rate > 350000000) @@ -583,8 +583,9 @@ int stmmac_mdio_register(struct net_device *ndev) struct device_node *mdio_node = priv->plat->mdio_node; struct device *dev = ndev->dev.parent; struct fwnode_handle *fixed_node; + int max_addr = PHY_MAX_ADDR - 1; struct fwnode_handle *fwnode; - int addr, found, max_addr; + struct phy_device *phydev; if (!mdio_bus_data) return 0; @@ -600,7 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->name = "stmmac"; - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { new_bus->read = &stmmac_xgmac2_mdio_read_c22; new_bus->write = &stmmac_xgmac2_mdio_write_c22; new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45; @@ -608,25 +609,20 @@ int stmmac_mdio_register(struct net_device *ndev) if (priv->synopsys_id < DWXGMAC_CORE_2_20) { /* Right now only C22 phys are supported */ - max_addr = MII_XGMAC_MAX_C22ADDR + 1; + max_addr = MII_XGMAC_MAX_C22ADDR; /* Check if DT specified an unsupported phy addr */ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) dev_err(dev, "Unsupported phy_addr (max=%d)\n", MII_XGMAC_MAX_C22ADDR); - } else { - /* XGMAC version 2.20 onwards support 32 phy addr */ - max_addr = PHY_MAX_ADDR; } } else { new_bus->read = &stmmac_mdio_read_c22; new_bus->write = &stmmac_mdio_write_c22; - if (priv->plat->has_gmac4) { + if (priv->plat->core_type == DWMAC_CORE_GMAC4) { new_bus->read_c45 = &stmmac_mdio_read_c45; new_bus->write_c45 = &stmmac_mdio_write_c45; } - - max_addr = PHY_MAX_ADDR; } if (mdio_bus_data->needs_reset) @@ -649,7 +645,7 @@ int stmmac_mdio_register(struct net_device *ndev) } /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0); /* If fixed-link is set, skip PHY scanning */ @@ -668,41 +664,31 @@ int stmmac_mdio_register(struct net_device *ndev) if (priv->plat->phy_node || mdio_node) goto bus_register_done; - found = 0; - for (addr = 0; addr < max_addr; addr++) { - struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); - - if (!phydev) - continue; - - /* - * If an IRQ was provided to be assigned after - * the bus probe, do it here. - */ - if (!mdio_bus_data->irqs && - (mdio_bus_data->probed_phy_irq > 0)) { - new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; - phydev->irq = mdio_bus_data->probed_phy_irq; - } - - /* - * If we're going to bind the MAC to this PHY bus, - * and no PHY number was provided to the MAC, - * use the one probed here. - */ - if (priv->plat->phy_addr == -1) - priv->plat->phy_addr = addr; - - phy_attached_info(phydev); - found = 1; - } - - if (!found && !mdio_node) { + phydev = phy_find_first(new_bus); + if (!phydev || phydev->mdio.addr > max_addr) { dev_warn(dev, "No PHY found\n"); err = -ENODEV; goto no_phy_found; } + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && mdio_bus_data->probed_phy_irq > 0) { + new_bus->irq[phydev->mdio.addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, and no PHY number + * was provided to the MAC, use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = phydev->mdio.addr; + + phy_attached_info(phydev); + bus_register_done: priv->mii = new_bus; @@ -734,3 +720,17 @@ int stmmac_mdio_unregister(struct net_device *ndev) return 0; } + +void stmmac_mdio_lock(struct stmmac_priv *priv) +{ + if (priv->mii) + mutex_lock(&priv->mii->mdio_lock); +} +EXPORT_SYMBOL_GPL(stmmac_mdio_lock); + +void stmmac_mdio_unlock(struct stmmac_priv *priv) +{ + if (priv->mii) + mutex_unlock(&priv->mii->mdio_lock); +} +EXPORT_SYMBOL_GPL(stmmac_mdio_unlock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 4e3aa611fda8..94b3a3b27270 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -23,7 +23,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat) { /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; plat->mdio_bus_data->needs_reset = true; @@ -76,7 +76,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, int i; plat->clk_csr = STMMAC_CSR_250_300M; - plat->has_gmac4 = 1; + plat->core_type = DWMAC_CORE_GMAC4; plat->force_sf_dma_mode = 1; plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c new file mode 100644 index 000000000000..e2f531c11986 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "stmmac.h" +#include "stmmac_pcs.h" + +static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + stmmac_mac_irq_modify(spcs->priv, 0, spcs->int_mask); + + return 0; +} + +static void dwmac_integrated_pcs_disable(struct phylink_pcs *pcs) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + stmmac_mac_irq_modify(spcs->priv, spcs->int_mask, 0); +} + +static void dwmac_integrated_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, + struct phylink_link_state *state) +{ + state->link = false; +} + +static int dwmac_integrated_pcs_config(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + dwmac_ctrl_ane(spcs->base, 0, 1, spcs->priv->hw->reverse_sgmii_enable); + + return 0; +} + +static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = { + .pcs_enable = dwmac_integrated_pcs_enable, + .pcs_disable = dwmac_integrated_pcs_disable, + .pcs_get_state = dwmac_integrated_pcs_get_state, + .pcs_config = dwmac_integrated_pcs_config, +}; + +int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset, + u32 int_mask) +{ + struct stmmac_pcs *spcs; + + spcs = devm_kzalloc(priv->device, sizeof(*spcs), GFP_KERNEL); + if (!spcs) + return -ENOMEM; + + spcs->priv = priv; + spcs->base = priv->ioaddr + offset; + spcs->int_mask = int_mask; + spcs->pcs.ops = &dwmac_integrated_pcs_ops; + + __set_bit(PHY_INTERFACE_MODE_SGMII, spcs->pcs.supported_interfaces); + + priv->integrated_pcs = spcs; + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index 4a684c97dfae..cda93894168e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -9,6 +9,7 @@ #ifndef __STMMAC_PCS_H__ #define __STMMAC_PCS_H__ +#include <linux/phylink.h> #include <linux/slab.h> #include <linux/io.h> #include "common.h" @@ -46,6 +47,24 @@ #define GMAC_ANE_RFE_SHIFT 12 #define GMAC_ANE_ACK BIT(14) +struct stmmac_priv; + +struct stmmac_pcs { + struct stmmac_priv *priv; + void __iomem *base; + u32 int_mask; + struct phylink_pcs pcs; +}; + +static inline struct stmmac_pcs * +phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct stmmac_pcs, pcs); +} + +int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset, + u32 int_mask); + /** * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR * @ioaddr: IO registers pointer @@ -82,13 +101,12 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, * @reg: Base address of the AN Control Register. * @ane: to enable the auto-negotiation * @srgmi_ral: to manage MAC-2-MAC SGMII connections. - * @loopback: to cause the PHY to loopback tx data into rx path. * Description: this is the main function to configure the AN control register * and init the ANE, select loopback (usually for debugging purpose) and * configure SGMII RAL. */ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, - bool srgmi_ral, bool loopback) + bool srgmi_ral) { u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); @@ -104,9 +122,6 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, if (srgmi_ral) value |= GMAC_AN_CTRL_SGMRAL; - if (loopback) - value |= GMAC_AN_CTRL_ELE; - writel(value, ioaddr + GMAC_AN_CTRL(reg)); } #endif /* __STMMAC_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 27bcaae07a7f..6483d52b4c0f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -552,12 +552,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) &pdev->dev, plat->unicast_filter_entries); plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( &pdev->dev, plat->multicast_filter_bins); - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->pmt = 1; } if (of_device_is_compatible(np, "snps,dwmac-3.40a")) { - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->enh_desc = 1; plat->tx_coe = 1; plat->bugged_jumbo = 1; @@ -565,8 +565,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) } if (of_device_compatible_match(np, stmmac_gmac4_compats)) { - plat->has_gmac4 = 1; - plat->has_gmac = 0; + plat->core_type = DWMAC_CORE_GMAC4; plat->pmt = 1; if (of_property_read_bool(np, "snps,tso")) plat->flags |= STMMAC_FLAG_TSO_EN; @@ -580,7 +579,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) } if (of_device_is_compatible(np, "snps,dwxgmac")) { - plat->has_xgmac = 1; + plat->core_type = DWMAC_CORE_XGMAC; plat->pmt = 1; if (of_property_read_bool(np, "snps,tso")) plat->flags |= STMMAC_FLAG_TSO_EN; @@ -970,7 +969,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) if (!netif_running(ndev)) return 0; - if (!stmmac_wol_enabled_mac(priv)) { + if (!priv->wolopts) { /* Disable clock in case of PWM is off */ clk_disable_unprepare(priv->plat->clk_ptp_ref); @@ -991,7 +990,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (!netif_running(ndev)) return 0; - if (!stmmac_wol_enabled_mac(priv)) { + if (!priv->wolopts) { /* enable the clk previously disabled */ ret = pm_runtime_force_resume(dev); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 993ff4e87e55..3e30172fa129 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -57,7 +57,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) bool xmac, est_rst = false; int ret; - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + xmac = dwmac_is_xmac(priv->plat->core_type); if (delta < 0) { neg_adj = 1; @@ -344,7 +344,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv) /* Calculate the clock domain crossing (CDC) error if necessary */ priv->plat->cdc_error_adj = 0; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; /* Update the ptp clock parameters based on feature discovery, when diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 3b4d4696afe9..d78652718599 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -262,10 +262,10 @@ static int tc_init(struct stmmac_priv *priv) unsigned int count; int ret, i; - if (dma_cap->l3l4fnum) { - priv->flow_entries_max = dma_cap->l3l4fnum; + priv->flow_entries_max = dma_cap->l3l4fnum; + if (priv->flow_entries_max) { priv->flow_entries = devm_kcalloc(priv->device, - dma_cap->l3l4fnum, + priv->flow_entries_max, sizeof(*priv->flow_entries), GFP_KERNEL); if (!priv->flow_entries) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 110eb2da8dbc..d5f358ec9820 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1788,28 +1788,28 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, } static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; - struct hwtstamp_config cfg; - if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) + if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) { + NL_SET_ERR_MSG(extack, "Time stamping is not supported"); return -EOPNOTSUPP; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + } /* TX HW timestamp */ - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; default: + NL_SET_ERR_MSG(extack, "TX mode is not supported"); return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: port->rx_ts_enabled = false; break; @@ -1826,17 +1826,19 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: port->rx_ts_enabled = true; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_NTP_ALL: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -EOPNOTSUPP; default: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -ERANGE; } - port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); + port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON); /* cfg TX timestamp */ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << @@ -1872,25 +1874,24 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); - struct hwtstamp_config cfg; if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = port->tx_ts_enabled ? + cfg->flags = 0; + cfg->tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | + cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, @@ -1901,13 +1902,6 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - switch (cmd) { - case SIOCSHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_set(ndev, req); - case SIOCGHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_get(ndev, req); - } - return phylink_mii_ioctl(port->slave.phylink, req, cmd); } @@ -1991,6 +1985,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, .ndo_bpf = am65_cpsw_ndo_bpf, .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, + .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get, + .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set, }; static void am65_cpsw_disable_phy(struct phy *phy) diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 68507126be8e..48f85a3649b2 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -234,7 +234,6 @@ static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) ret = mdiobb_read_c22(bus, phy, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -251,7 +250,6 @@ static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, ret = mdiobb_write_c22(bus, phy, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -268,7 +266,6 @@ static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_read_c45(bus, phy, devad, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -285,7 +282,6 @@ static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_write_c45(bus, phy, devad, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -332,7 +328,6 @@ static int davinci_mdio_common_reset(struct davinci_mdio_data *data) data->bus->phy_mask = phy_mask; done: - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return 0; @@ -441,7 +436,6 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; } @@ -478,7 +472,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; @@ -548,8 +541,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; - int ret, addr; int autosuspend_delay_ms = -1; + int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -652,14 +645,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) goto bail_out; /* scan and dump the bus */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = mdiobus_get_phy(data->bus, addr); - if (phy) { - dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->mdio.addr, phydev_name(phy), - phy->drv ? phy->drv->name : "unknown"); - } - } + mdiobus_for_each_phy(data->bus, phy) + dev_info(dev, "phy[%d]: device %s, driver %s\n", + phy->mdio.addr, phydev_name(phy), + phy->drv ? phy->drv->name : "unknown"); return 0; diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 57e5f1c88f50..0eed29d6187a 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -1223,15 +1223,13 @@ void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) } EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); -static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: emac->tx_ts_enabled = 0; break; @@ -1242,7 +1240,7 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: emac->rx_ts_enabled = 0; break; @@ -1262,43 +1260,28 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: emac->rx_ts_enabled = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config); -static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return emac_get_ts_config(ndev, ifr); - case SIOCSHWTSTAMP: - return emac_set_ts_config(ndev, ifr); - default: - break; - } + config->flags = 0; + config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return phy_do_ioctl(ndev, ifr, cmd); + return 0; } -EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); +EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index e42d0fdefee1..57a7d1ceab08 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1168,7 +1168,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, .ndo_fix_features = emac_ndo_fix_features, @@ -1176,6 +1176,8 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, .ndo_bpf = emac_ndo_bpf, .ndo_xdp_xmit = emac_xdp_xmit, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, }; static int prueth_netdev_init(struct prueth *prueth, @@ -1248,8 +1250,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index ca8a22a4a5da..f0fa9688d9a0 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -479,7 +479,11 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable); void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config); +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 5e225310c9de..7bb4f0d850cc 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -747,9 +747,11 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, }; static int prueth_netdev_init(struct prueth *prueth, @@ -816,8 +818,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 7007eb8bed36..b9cbd3b4a8a2 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -207,6 +207,11 @@ struct netcp_module { int (*del_vid)(void *intf_priv, int vid); int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); int (*set_rx_mode)(void *intf_priv, bool promisc); + int (*hwtstamp_get)(void *intf_priv, + struct kernel_hwtstamp_config *cfg); + int (*hwtstamp_set)(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); /* used internally */ struct list_head module_list; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 5ee13db568f0..5ed1c46bbcb1 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1781,6 +1781,62 @@ static int netcp_ndo_stop(struct net_device *ndev) return 0; } +static int netcp_ndo_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_get) + continue; + + err = module->hwtstamp_get(intf_modpriv->module_priv, config); + break; + } + + return err; +} + +static int netcp_ndo_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int ret = -1, err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_set) + continue; + + err = module->hwtstamp_set(intf_modpriv->module_priv, config, + extack); + if ((err < 0) && (err != -EOPNOTSUPP)) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "At least one module failed to setup HW timestamps"); + ret = err; + goto out; + } + if (err == 0) + ret = err; + } + +out: + return (ret == 0) ? 0 : err; +} + static int netcp_ndo_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { @@ -1952,6 +2008,8 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_tx_timeout = netcp_ndo_tx_timeout, .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, + .ndo_hwtstamp_get = netcp_ndo_hwtstamp_get, + .ndo_hwtstamp_set = netcp_ndo_hwtstamp_set, }; static int netcp_create_interface(struct netcp_device *netcp_device, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 55a1a96cd834..4f6cc6cd1f03 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2591,20 +2591,26 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info) return 0; } -static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_get(void *intf_priv, struct kernel_hwtstamp_config *cfg) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; + + gbe_dev = gbe_intf->gbe_dev; - if (!cpts) + if (!gbe_dev->cpts) + return -EOPNOTSUPP; + + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = gbe_dev->rx_ts_enabled; + cfg->flags = 0; + cfg->tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = gbe_dev->rx_ts_enabled; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_hwtstamp(struct gbe_intf *gbe_intf) @@ -2637,19 +2643,23 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf) writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2)); } -static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; - if (!cpts) + gbe_dev = gbe_intf->gbe_dev; + + if (!gbe_dev->cpts) return -EOPNOTSUPP; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) + return phy->mii_ts->hwtstamp(phy->mii_ts, cfg, extack); - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: gbe_dev->tx_ts_enabled = 0; break; @@ -2660,7 +2670,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE; break; @@ -2668,7 +2678,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -2680,7 +2690,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; @@ -2688,7 +2698,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) gbe_hwtstamp(gbe_intf); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_register_cpts(struct gbe_priv *gbe_dev) @@ -2745,12 +2755,15 @@ static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev) { } -static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_get(void *intf_priv, + struct kernel_hwtstamp_config *cfg) { return -EOPNOTSUPP; } -static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_set(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -2816,15 +2829,6 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) struct gbe_intf *gbe_intf = intf_priv; struct phy_device *phy = gbe_intf->slave->phy; - if (!phy_has_hwtstamp(phy)) { - switch (cmd) { - case SIOCGHWTSTAMP: - return gbe_hwtstamp_get(gbe_intf, req); - case SIOCSHWTSTAMP: - return gbe_hwtstamp_set(gbe_intf, req); - } - } - if (phy) return phy_mii_ioctl(phy, req, cmd); @@ -3824,6 +3828,8 @@ static struct netcp_module gbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static struct netcp_module xgbe_module = { @@ -3841,6 +3847,8 @@ static struct netcp_module xgbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static int __init keystone_gbe_init(void) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 06f401bd975c..9aa3964187e1 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -51,6 +51,11 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = { WX_STAT("fdir_miss", stats.fdirmiss), }; +static const struct wx_stats wx_gstrings_rsc_stats[] = { + WX_STAT("rsc_aggregated", rsc_count), + WX_STAT("rsc_flushed", rsc_flush), +}; + /* drivers allocates num_tx_queues and num_rx_queues symmetrically so * we set the num_rx_queues to evaluate to num_tx_queues. This is * used because we do not have a good way to get the max number of @@ -64,16 +69,21 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = { (sizeof(struct wx_queue_stats) / sizeof(u64))) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) +#define WX_RSC_STATS_LEN ARRAY_SIZE(wx_gstrings_rsc_stats) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) int wx_get_sset_count(struct net_device *netdev, int sset) { struct wx *wx = netdev_priv(netdev); + int len = WX_STATS_LEN; switch (sset) { case ETH_SS_STATS: - return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? - WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + len += WX_FDIR_STATS_LEN; + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + len += WX_RSC_STATS_LEN; + return len; default: return -EOPNOTSUPP; } @@ -94,6 +104,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) for (i = 0; i < WX_FDIR_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); } + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + for (i = 0; i < WX_RSC_STATS_LEN; i++) + ethtool_puts(&p, wx_gstrings_rsc_stats[i].stat_string); + } for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -131,6 +145,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, } } + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + for (k = 0; k < WX_RSC_STATS_LEN; k++) { + p = (char *)wx + wx_gstrings_rsc_stats[k].stat_offset; + data[i++] = *(u64 *)p; + } + } + for (j = 0; j < netdev->num_tx_queues; j++) { ring = wx->tx_ring[j]; if (!ring) { @@ -322,6 +343,40 @@ int wx_get_coalesce(struct net_device *netdev, } EXPORT_SYMBOL(wx_get_coalesce); +static void wx_update_rsc(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + bool need_reset = false; + + /* nothing to do if LRO or RSC are not enabled */ + if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags) || + !(netdev->features & NETIF_F_LRO)) + return; + + /* check the feature flag value and enable RSC if necessary */ + if (wx->rx_itr_setting == 1 || + wx->rx_itr_setting > WX_MIN_RSC_ITR) { + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, + "rx-usecs value high enough to re-enable RSC\n"); + + need_reset = true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, + "rx-usecs set too low, disabling RSC\n"); + + need_reset = true; + } + + /* reset the device to apply the new RSC setting */ + if (need_reset && wx->do_reset) + wx->do_reset(netdev); +} + int wx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, @@ -414,6 +469,8 @@ int wx_set_coalesce(struct net_device *netdev, wx_write_eitr(q_vector); } + wx_update_rsc(wx); + return 0; } EXPORT_SYMBOL(wx_set_coalesce); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index b37d6cfbfbe9..58b8300e3d2c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1779,7 +1779,9 @@ EXPORT_SYMBOL(wx_set_rx_mode); static void wx_set_rx_buffer_len(struct wx *wx) { struct net_device *netdev = wx->netdev; + struct wx_ring *rx_ring; u32 mhadd, max_frame; + int i; max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; /* adjust max frame to be at least the size of a standard frame */ @@ -1789,6 +1791,19 @@ static void wx_set_rx_buffer_len(struct wx *wx) mhadd = rd32(wx, WX_PSR_MAX_SZ); if (max_frame != mhadd) wr32(wx, WX_PSR_MAX_SZ, max_frame); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < wx->num_rx_queues; i++) { + rx_ring = wx->rx_ring[i]; + rx_ring->rx_buf_len = WX_RXBUFFER_2K; +#if (PAGE_SIZE < 8192) + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + rx_ring->rx_buf_len = WX_RXBUFFER_3K; +#endif + } } /** @@ -1865,11 +1880,27 @@ static void wx_configure_srrctl(struct wx *wx, srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; + srrctl |= rx_ring->rx_buf_len >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); } +static void wx_configure_rscctl(struct wx *wx, + struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rscctrl; + + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + return; + + rscctrl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + rscctrl |= WX_PX_RR_CFG_RSC; + rscctrl |= WX_PX_RR_CFG_MAX_RSCBUF_16; + + wr32(wx, WX_PX_RR_CFG(reg_idx), rscctrl); +} + static void wx_configure_tx_ring(struct wx *wx, struct wx_ring *ring) { @@ -1905,6 +1936,15 @@ static void wx_configure_tx_ring(struct wx *wx, memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); + if (ring->headwb_mem) { + wr32(wx, WX_PX_TR_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_TR_HEAD_ADDRH(reg_idx), + upper_32_bits(ring->headwb_dma)); + + txdctl |= WX_PX_TR_CFG_HEAD_WB; + } + /* enable queue */ wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); @@ -1935,6 +1975,10 @@ static void wx_configure_rx_ring(struct wx *wx, rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; + + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) + rxdctl |= WX_PX_RR_CFG_DESC_MERGE; + wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); /* reset head and tail pointers */ @@ -1943,6 +1987,7 @@ static void wx_configure_rx_ring(struct wx *wx, ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); wx_configure_srrctl(wx, ring); + wx_configure_rscctl(wx, ring); /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, @@ -2181,7 +2226,9 @@ void wx_configure_rx(struct wx *wx) /* RSC Setup */ psrctl = rd32(wx, WX_PSR_CTL); psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ - psrctl |= WX_PSR_CTL_RSC_DIS; + psrctl &= ~WX_PSR_CTL_RSC_DIS; + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + psrctl |= WX_PSR_CTL_RSC_DIS; wr32(wx, WX_PSR_CTL, psrctl); } @@ -2190,6 +2237,12 @@ void wx_configure_rx(struct wx *wx) /* set_rx_buffer_len must be called before ring initialization */ wx_set_rx_buffer_len(wx); + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) { + wr32(wx, WX_RDM_DCACHE_CTL, WX_RDM_DCACHE_CTL_EN); + wr32m(wx, WX_RDM_RSC_CTL, + WX_RDM_RSC_CTL_FREE_CTL | WX_RDM_RSC_CTL_FREE_CNT_DIS, + WX_RDM_RSC_CTL_FREE_CTL); + } /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -2806,6 +2859,18 @@ void wx_update_stats(struct wx *wx) wx->hw_csum_rx_error = hw_csum_rx_error; wx->hw_csum_rx_good = hw_csum_rx_good; + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + + for (i = 0; i < wx->num_rx_queues; i++) { + rsc_count += wx->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += wx->rx_ring[i]->rx_stats.rsc_flush; + } + wx->rsc_count = rsc_count; + wx->rsc_flush = rsc_flush; + } + for (i = 0; i < wx->num_tx_queues; i++) { struct wx_ring *tx_ring = wx->tx_ring[i]; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 3adf7048320a..32cadafa4b3b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -235,7 +235,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, { unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = WX_RX_BUFSZ; + unsigned int truesize = wx_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif @@ -341,7 +341,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, - WX_RX_BUFSZ, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_desc->read.pkt_addr = @@ -404,6 +404,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { + struct wx *wx = rx_ring->q_vector->wx; u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ @@ -412,6 +413,24 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, prefetch(WX_RX_DESC(rx_ring, ntc)); + /* update RSC append count if present */ + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(WX_RXD_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= WX_RXD_RSCCNT_SHIFT; + WX_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= WX_RXD_NEXTP_MASK; + ntc >>= WX_RXD_NEXTP_SHIFT; + } + } + /* if we are the last buffer then there is nothing else to do */ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) return false; @@ -582,6 +601,33 @@ static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, } } +static void wx_set_rsc_gso_size(struct wx_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + WX_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void wx_update_rsc_stats(struct wx_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!WX_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += WX_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + wx_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + WX_CB(skb)->append_cnt = 0; +} + /** * wx_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -598,6 +644,9 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring, { struct wx *wx = netdev_priv(rx_ring->netdev); + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + wx_update_rsc_stats(rx_ring, skb); + wx_rx_hash(rx_ring, rx_desc, skb); wx_rx_checksum(rx_ring, rx_desc, skb); @@ -735,9 +784,22 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, /* prevent any other reads prior to eop_desc */ smp_rmb(); - /* if DD is not set pending work has not been completed */ - if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) + if (tx_ring->headwb_mem) { + u32 head = *tx_ring->headwb_mem; + + if (head == tx_ring->next_to_clean) + break; + else if (head > tx_ring->next_to_clean && + !(tx_buffer->next_eop >= tx_ring->next_to_clean && + tx_buffer->next_eop < head)) + break; + else if (!(tx_buffer->next_eop >= tx_ring->next_to_clean || + tx_buffer->next_eop < head)) + break; + } else if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) { + /* if DD is not set pending work has not been completed */ break; + } /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; @@ -1075,6 +1137,10 @@ static int wx_tx_map(struct wx_ring *tx_ring, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; + /* set next_eop for amlite tx head wb */ + if (tx_ring->headwb_mem) + first->next_eop = i; + i++; if (i == tx_ring->count) i = 0; @@ -2532,7 +2598,7 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - WX_RX_BUFSZ, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* free resources associated with mapping */ @@ -2683,6 +2749,16 @@ void wx_clean_all_tx_rings(struct wx *wx) } EXPORT_SYMBOL(wx_clean_all_tx_rings); +static void wx_free_headwb_resources(struct wx_ring *tx_ring) +{ + if (!tx_ring->headwb_mem) + return; + + dma_free_coherent(tx_ring->dev, sizeof(u32), + tx_ring->headwb_mem, tx_ring->headwb_dma); + tx_ring->headwb_mem = NULL; +} + /** * wx_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue @@ -2702,6 +2778,8 @@ static void wx_free_tx_resources(struct wx_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; + + wx_free_headwb_resources(tx_ring); } /** @@ -2731,13 +2809,14 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring) struct page_pool_params pp_params = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rx_ring->count, + .order = wx_rx_pg_order(rx_ring), + .pool_size = rx_ring->count * rx_ring->rx_buf_len / + wx_rx_pg_size(rx_ring), .nid = dev_to_node(rx_ring->dev), .dev = rx_ring->dev, .dma_dir = DMA_FROM_DEVICE, .offset = 0, - .max_len = PAGE_SIZE, + .max_len = wx_rx_pg_size(rx_ring), }; rx_ring->page_pool = page_pool_create(&pp_params); @@ -2840,6 +2919,24 @@ err_setup_rx: return err; } +static void wx_setup_headwb_resources(struct wx_ring *tx_ring) +{ + struct wx *wx = netdev_priv(tx_ring->netdev); + + if (!test_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags)) + return; + + if (!tx_ring->q_vector) + return; + + tx_ring->headwb_mem = dma_alloc_coherent(tx_ring->dev, + sizeof(u32), + &tx_ring->headwb_dma, + GFP_KERNEL); + if (!tx_ring->headwb_mem) + dev_info(tx_ring->dev, "Allocate headwb memory failed, disable it\n"); +} + /** * wx_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -2880,6 +2977,8 @@ static int wx_setup_tx_resources(struct wx_ring *tx_ring) if (!tx_ring->desc) goto err; + wx_setup_headwb_resources(tx_ring); + tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -3026,8 +3125,25 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + if (!(features & NETIF_F_LRO)) { + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + need_reset = true; + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + } else if (!(test_bit(WX_FLAG_RSC_ENABLED, wx->flags))) { + if (wx->rx_itr_setting == 1 || + wx->rx_itr_setting > WX_MIN_RSC_ITR) { + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); + need_reset = true; + } else if (changed & NETIF_F_LRO) { + dev_info(&wx->pdev->dev, + "rx-usecs set too low, disable RSC\n"); + } + } + } + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) - return 0; + goto out; /* Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. @@ -3053,6 +3169,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) break; } +out: if (need_reset && wx->do_reset) wx->do_reset(netdev); @@ -3102,6 +3219,14 @@ netdev_features_t wx_fix_features(struct net_device *netdev, } } + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + features &= ~NETIF_F_LRO; + return features; } EXPORT_SYMBOL(wx_fix_features); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c index c6d158cd70da..493da5fffdb6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -122,6 +122,10 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) WX_CFG_PORT_CTL_NUM_VT_MASK, value); + /* Disable RSC when in SR-IOV mode */ + clear_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + return ret; } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 2f8319e03182..b1a6ef5709a9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -83,8 +83,13 @@ /*********************** Receive DMA registers **************************/ #define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define WX_RDM_RSC_CTL 0x1200C +#define WX_RDM_RSC_CTL_FREE_CNT_DIS BIT(8) +#define WX_RDM_RSC_CTL_FREE_CTL BIT(7) #define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) #define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) +#define WX_RDM_DCACHE_CTL 0x120A8 +#define WX_RDM_DCACHE_CTL_EN BIT(0) #define WX_RDM_DRP_PKT 0x12500 #define WX_RDM_PKT_CNT 0x12504 #define WX_RDM_BYTE_CNT_LSB 0x12508 @@ -421,6 +426,7 @@ enum WX_MSCA_CMD_value { #define WX_7K_ITR 595 #define WX_12K_ITR 336 #define WX_20K_ITR 200 +#define WX_MIN_RSC_ITR 24 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_AML_MAX_EITR 0x00000FFFU #define WX_EM_MAX_EITR 0x00007FFCU @@ -431,12 +437,15 @@ enum WX_MSCA_CMD_value { #define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) #define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) #define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +#define WX_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40)) +#define WX_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40)) /* Transmit Config masks */ #define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ #define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ #define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ #define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define WX_PX_TR_CFG_THRE_SHIFT 8 +#define WX_PX_TR_CFG_HEAD_WB BIT(27) /* Receive DMA Registers */ #define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) @@ -448,7 +457,10 @@ enum WX_MSCA_CMD_value { /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_DROP_EN BIT(30) +#define WX_PX_RR_CFG_RSC BIT(29) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) +#define WX_PX_RR_CFG_MAX_RSCBUF_16 FIELD_PREP(GENMASK(24, 23), 3) +#define WX_PX_RR_CFG_DESC_MERGE BIT(19) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) #define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) @@ -544,14 +556,9 @@ enum WX_MSCA_CMD_value { /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ #define WX_RXBUFFER_2K 2048 +#define WX_RXBUFFER_3K 3072 #define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ -#if MAX_SKB_FRAGS < 8 -#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) -#else -#define WX_RX_BUFSZ WX_RXBUFFER_2K -#endif - #define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define WX_MAX_DATA_PER_TXD BIT(14) @@ -643,6 +650,12 @@ enum wx_l2_ptypes { #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) + +#define WX_RXD_RSCCNT_MASK GENMASK(20, 17) +#define WX_RXD_RSCCNT_SHIFT 17 +#define WX_RXD_NEXTP_MASK GENMASK(19, 4) +#define WX_RXD_NEXTP_SHIFT 4 + /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ @@ -1005,6 +1018,7 @@ struct wx_tx_buffer { DEFINE_DMA_UNMAP_LEN(len); __be16 protocol; u32 tx_flags; + u32 next_eop; }; struct wx_rx_buffer { @@ -1029,6 +1043,8 @@ struct wx_rx_queue_stats { u64 csum_good_cnt; u64 csum_err; u64 alloc_rx_buff_failed; + u64 rsc_count; + u64 rsc_flush; }; /* iterator for handling rings in ring container */ @@ -1056,6 +1072,8 @@ struct wx_ring { }; u8 __iomem *tail; dma_addr_t dma; /* phys. address of descriptor ring */ + dma_addr_t headwb_dma; + u32 *headwb_mem; unsigned int size; /* length in bytes */ u16 count; /* amount of descriptors */ @@ -1069,6 +1087,7 @@ struct wx_ring { */ u16 next_to_use; u16 next_to_clean; + u16 rx_buf_len; union { u16 next_to_alloc; struct { @@ -1225,6 +1244,7 @@ enum wx_pf_flags { WX_FLAG_FDIR_HASH, WX_FLAG_FDIR_PERFECT, WX_FLAG_RSC_CAPABLE, + WX_FLAG_RSC_ENABLED, WX_FLAG_RX_HWTSTAMP_ENABLED, WX_FLAG_RX_HWTSTAMP_IN_REGISTER, WX_FLAG_PTP_PPS_ENABLED, @@ -1232,6 +1252,8 @@ enum wx_pf_flags { WX_FLAG_NEED_SFP_RESET, WX_FLAG_NEED_UPDATE_LINK, WX_FLAG_NEED_DO_RESET, + WX_FLAG_RX_MERGE_ENABLED, + WX_FLAG_TXHEAD_WB_ENABLED, WX_PF_FLAGS_NBITS /* must be last */ }; @@ -1271,8 +1293,6 @@ struct wx { /* PHY stuff */ bool notify_down; - int adv_speed; - int adv_duplex; unsigned int link; int speed; int duplex; @@ -1340,6 +1360,8 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + u64 rsc_count; + u64 rsc_flush; unsigned int num_vfs; struct vf_data_storage *vfinfo; struct vf_macvlans vf_mvs; @@ -1471,4 +1493,15 @@ static inline int wx_set_state_reset(struct wx *wx) return 0; } +static inline unsigned int wx_rx_pg_order(struct wx_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len == WX_RXBUFFER_3K) + return 1; +#endif + return 0; +} + +#define wx_rx_pg_size(_ring) (PAGE_SIZE << wx_rx_pg_order(_ring)) + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h index 3f16de0fa427..eb6ca3fe4e97 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_vf.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h @@ -74,6 +74,7 @@ #define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f) #define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12) #define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f) +#define WX_VXRXDCTL_DESC_MERGE BIT(19) #define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23) #define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f) #define WX_VXRXDCTL_RSCEN BIT(29) @@ -91,6 +92,9 @@ #define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f) #define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f) #define WX_VXTXDCTL_FLUSH BIT(26) +#define WX_VXTXDCTL_HEAD_WB BIT(27) +#define WX_VXTXD_HEAD_ADDRL(r) (0x3028 + (0x40 * (r))) +#define WX_VXTXD_HEAD_ADDRH(r) (0x302C + (0x40 * (r))) #define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g) #define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c index a87887b9f8ee..aa8be036956c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c @@ -132,6 +132,15 @@ static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring) txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count)); txdctl |= WX_VXTXDCTL_ENABLE; + if (ring->headwb_mem) { + wr32(wx, WX_VXTXD_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(wx, WX_VXTXD_HEAD_ADDRH(reg_idx), + upper_32_bits(ring->headwb_dma)); + + txdctl |= WX_VXTXDCTL_HEAD_WB; + } + /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); @@ -272,6 +281,9 @@ void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring) rxdctl |= WX_VXRXDCTL_RSCMAX(0); rxdctl |= WX_VXRXDCTL_RSCEN; + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) + rxdctl |= WX_VXRXDCTL_DESC_MERGE; + wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl); /* pf/vf reuse */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c index dc87ccad9652..35eebdb07761 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -19,8 +19,8 @@ void txgbe_gpio_init_aml(struct wx *wx) { u32 status; - wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); - wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2); + wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2); status = rd32(wx, WX_GPIO_INTSTATUS); for (int i = 0; i < 6; i++) { @@ -42,11 +42,6 @@ irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data) wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2); wx_service_event_schedule(wx); } - if (status & TXGBE_GPIOBIT_3) { - set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); - wx_service_event_schedule(wx); - wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3); - } wr32(wx, WX_GPIO_INTMASK, 0); return IRQ_HANDLED; @@ -96,6 +91,9 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int case SPEED_10000: buffer.speed = TXGBE_LINK_SPEED_10GB_FULL; break; + default: + buffer.speed = TXGBE_LINK_SPEED_UNKNOWN; + break; } buffer.fec_mode = TXGBE_PHY_FEC_AUTO; @@ -106,22 +104,21 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int WX_HI_COMMAND_TIMEOUT, true); } -static void txgbe_get_link_capabilities(struct wx *wx) +static void txgbe_get_link_capabilities(struct wx *wx, int *speed, int *duplex) { struct txgbe *txgbe = wx->priv; if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces)) - wx->adv_speed = SPEED_25000; + *speed = SPEED_25000; else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces)) - wx->adv_speed = SPEED_10000; + *speed = SPEED_10000; else - wx->adv_speed = SPEED_UNKNOWN; + *speed = SPEED_UNKNOWN; - wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ? - DUPLEX_HALF : DUPLEX_FULL; + *duplex = *speed == SPEED_UNKNOWN ? DUPLEX_HALF : DUPLEX_FULL; } -static void txgbe_get_phy_link(struct wx *wx, int *speed) +static void txgbe_get_mac_link(struct wx *wx, int *speed) { u32 status; @@ -138,23 +135,11 @@ static void txgbe_get_phy_link(struct wx *wx, int *speed) int txgbe_set_phy_link(struct wx *wx) { - int speed, err; - u32 gpio; - - /* Check RX signal */ - gpio = rd32(wx, WX_GPIO_EXT); - if (gpio & TXGBE_GPIOBIT_3) - return -ENODEV; + int speed, duplex, err; - txgbe_get_link_capabilities(wx); - if (wx->adv_speed == SPEED_UNKNOWN) - return -ENODEV; - - txgbe_get_phy_link(wx, &speed); - if (speed == wx->adv_speed) - return 0; + txgbe_get_link_capabilities(wx, &speed, &duplex); - err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex); + err = txgbe_set_phy_link_hostif(wx, speed, 0, duplex); if (err) { wx_err(wx, "Failed to setup link\n"); return err; @@ -230,14 +215,7 @@ int txgbe_identify_sfp(struct wx *wx) return -ENODEV; } - err = txgbe_sfp_to_linkmodes(wx, id); - if (err) - return err; - - if (gpio & TXGBE_GPIOBIT_3) - set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); - - return 0; + return txgbe_sfp_to_linkmodes(wx, id); } void txgbe_setup_link(struct wx *wx) @@ -256,7 +234,7 @@ static void txgbe_get_link_state(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); int speed; - txgbe_get_phy_link(wx, &speed); + txgbe_get_mac_link(wx, &speed); state->link = speed != SPEED_UNKNOWN; state->speed = speed; state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index c4c4d70d8466..daa761e48f9d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -398,6 +398,7 @@ static int txgbe_sw_init(struct wx *wx) wx->configure_fdir = txgbe_configure_fdir; set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); /* enable itr by default in dynamic mode */ @@ -423,6 +424,8 @@ static int txgbe_sw_init(struct wx *wx) break; case wx_mac_aml: case wx_mac_aml40: + set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags); + set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags); set_bit(WX_FLAG_SWFW_RING, wx->flags); wx->swfw_index = 0; break; @@ -801,6 +804,8 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_GRO; netdev->features |= NETIF_F_GRO; + netdev->hw_features |= NETIF_F_LRO; + netdev->features |= NETIF_F_LRO; netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 41915d7dd372..b9a4ba48f5b9 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -314,6 +314,7 @@ void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); void txgbe_do_reset(struct net_device *netdev); +#define TXGBE_LINK_SPEED_UNKNOWN 0 #define TXGBE_LINK_SPEED_10GB_FULL 4 #define TXGBE_LINK_SPEED_25GB_FULL 0x10 @@ -352,7 +353,9 @@ struct txgbe_sfp_id { u8 vendor_oui0; /* A0H 0x25 */ u8 vendor_oui1; /* A0H 0x26 */ u8 vendor_oui2; /* A0H 0x27 */ - u8 reserved[3]; + u8 transceiver_type; /* A0H 0x83 */ + u8 sff_opt1; /* A0H 0xC0 */ + u8 reserved[5]; }; struct txgbe_hic_i2c_read { diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c index 72663e3c4205..37e4ec487afd 100644 --- a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c @@ -157,6 +157,18 @@ static int txgbevf_sw_init(struct wx *wx) wx->set_num_queues = txgbevf_set_num_queues; + switch (wx->mac.type) { + case wx_mac_sp: + break; + case wx_mac_aml: + case wx_mac_aml40: + set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags); + set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags); + break; + default: + break; + } + return 0; err_reset_hw: kfree(wx->vfinfo); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 5cb59d72bc82..4213c3b2d532 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -633,7 +633,7 @@ static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type) hdr->tid = 0; /* seq, npdu and next should be counted to the length of the GTP packet - * that's why szie of gtp1_header should be subtracted, + * that's why size of gtp1_header should be subtracted, * not size of gtp1_header_long. */ diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index 245a06997055..8336596b1247 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -149,7 +149,6 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id) iowrite32(pending, ipa->reg_virt + reg_offset(reg)); } out_power_put: - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); return IRQ_HANDLED; diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 25500c5a6928..95a61bae3124 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -903,7 +903,6 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_deconfig; done: - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); return 0; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index 8fe0d0e1a00f..9b136f6b8b4a 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -71,7 +71,6 @@ static int ipa_open(struct net_device *netdev) netif_start_queue(netdev); - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); return 0; @@ -102,7 +101,6 @@ static int ipa_stop(struct net_device *netdev) ipa_endpoint_disable_one(priv->rx); ipa_endpoint_disable_one(priv->tx); out_power_put: - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); return 0; @@ -175,7 +173,6 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) ret = ipa_endpoint_skb_tx(endpoint, skb); - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); if (ret) { @@ -432,7 +429,6 @@ static void ipa_modem_crashed(struct ipa *ipa) dev_err(dev, "error %d zeroing modem memory regions\n", ret); out_power_put: - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); } diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index fcaadd111a8a..420098796eec 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -171,7 +171,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) WARN(ret != 0, "error %d from ipa_setup()\n", ret); out_power_put: - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); return IRQ_HANDLED; @@ -213,7 +212,6 @@ static void ipa_smp2p_power_release(struct ipa *ipa) if (!ipa->smp2p->power_on) return; - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); ipa->smp2p->power_on = false; } diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index 2963db83ab6b..dc7e92f2a4fb 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -158,7 +158,6 @@ static void ipa_uc_response_hdlr(struct ipa *ipa) if (ipa->uc_powered) { ipa->uc_loaded = true; ipa_power_retention(ipa, true); - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); ipa->uc_powered = false; } else { @@ -203,7 +202,6 @@ void ipa_uc_deconfig(struct ipa *ipa) if (!ipa->uc_powered) return; - pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 47cdee5577d4..36a1be4923d6 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -277,6 +277,7 @@ void nsim_ipsec_init(struct netdevsim *ns) NETIF_F_GSO_ESP) ns->netdev->features |= NSIM_ESP_FEATURES; + ns->netdev->hw_features |= NSIM_ESP_FEATURES; ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 02c1c97b7008..af6fcfcda8ba 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -109,6 +109,11 @@ struct netdevsim { int rq_reset_mode; struct { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; struct psp_dev *dev; u32 spi; u32 assoc_cnt; diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c index 332b5b744f01..727da06101ca 100644 --- a/drivers/net/netdevsim/psp.c +++ b/drivers/net/netdevsim/psp.c @@ -70,6 +70,13 @@ nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns, *psp_ext = skb->extensions; refcount_inc(&(*psp_ext)->refcnt); skb->decrypted = 1; + + u64_stats_update_begin(&ns->psp.syncp); + ns->psp.tx_packets++; + ns->psp.rx_packets++; + ns->psp.tx_bytes += skb->len - skb_inner_transport_offset(skb); + ns->psp.rx_bytes += skb->len - skb_inner_transport_offset(skb); + u64_stats_update_end(&ns->psp.syncp); } else { struct ipv6hdr *ip6h __maybe_unused; struct iphdr *iph; @@ -164,12 +171,32 @@ static void nsim_assoc_del(struct psp_dev *psd, struct psp_assoc *pas) ns->psp.assoc_cnt--; } +static void nsim_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats) +{ + struct netdevsim *ns = psd->drv_priv; + unsigned int start; + + /* WARNING: do *not* blindly zero stats in real drivers! + * All required stats must be reported by the device! + */ + memset(stats, 0, sizeof(struct psp_dev_stats)); + + do { + start = u64_stats_fetch_begin(&ns->psp.syncp); + stats->rx_bytes = ns->psp.rx_bytes; + stats->rx_packets = ns->psp.rx_packets; + stats->tx_bytes = ns->psp.tx_bytes; + stats->tx_packets = ns->psp.tx_packets; + } while (u64_stats_fetch_retry(&ns->psp.syncp, start)); +} + static struct psp_dev_ops nsim_psp_ops = { .set_config = nsim_psp_set_config, .rx_spi_alloc = nsim_rx_spi_alloc, .tx_key_add = nsim_assoc_add, .tx_key_del = nsim_assoc_del, .key_rotate = nsim_key_rotate, + .get_stats = nsim_get_stats, }; static struct psp_dev_caps nsim_psp_caps = { diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 492be60f2e70..0a2fef7caccb 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -16,17 +16,19 @@ #define DRV_NAME "netkit" struct netkit { - /* Needed in fast-path */ + __cacheline_group_begin(netkit_fastpath); struct net_device __rcu *peer; struct bpf_mprog_entry __rcu *active; enum netkit_action policy; enum netkit_scrub scrub; struct bpf_mprog_bundle bundle; + __cacheline_group_end(netkit_fastpath); - /* Needed in slow-path */ + __cacheline_group_begin(netkit_slowpath); enum netkit_mode mode; bool primary; u32 headroom; + __cacheline_group_end(netkit_slowpath); }; struct netkit_link { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 98700d069191..a7ade7b95a2e 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -308,7 +308,7 @@ config MICREL_PHY config MICROCHIP_T1S_PHY tristate "Microchip 10BASE-T1S Ethernet PHYs" help - Currently supports the LAN8670/1/2 Rev.B1/C1/C2 and + Currently supports the LAN8670/1/2 Rev.B1/C1/C2/D0 and LAN8650/1 Rev.B0/B1 Internal PHYs. config MICROCHIP_PHY diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index 23af1ac194fa..d75dae6071ad 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -61,6 +61,7 @@ #define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15) #define DP83TD510E_MSE_DETECT 0xa85 +#define DP83TD510E_MSE_MAX U16_MAX #define DP83TD510_SQI_MAX 7 @@ -249,6 +250,64 @@ struct dp83td510_priv { #define DP83TD510E_ALCD_COMPLETE BIT(15) #define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0) +static int dp83td510_get_mse_capability(struct phy_device *phydev, + struct phy_mse_capability *cap) +{ + /* DP83TD510E documents only a single (average) MSE register + * (used to derive SQI); no peak or worst-peak counters are + * described. Advertise only PHY_MSE_CAP_AVG. + */ + cap->supported_caps = PHY_MSE_CAP_AVG; + /* 10BASE-T1L is a single-pair medium, so there are no B/C/D channels. + * We still advertise PHY_MSE_CAP_CHANNEL_A to indicate that the PHY + * can attribute the measurement to a specific pair (the only one), + * rather than exposing it only as a link-aggregate. + * + * Rationale: + * - Keeps the ethtool MSE_GET selection logic consistent: per-channel + * (A/B/C/D) is preferred over WORST/LINK, so userspace receives a + * CHANNEL_A nest instead of LINK. + * - Signals to tools that "per-pair" data is available (even if there's + * just one pair), avoiding the impression that only aggregate values + * are supported. + * - Remains compatible with multi-pair PHYs and uniform UI handling. + * + * Note: WORST and other channels are not advertised on 10BASE-T1L. + */ + cap->supported_caps |= PHY_MSE_CHANNEL_A | PHY_MSE_CAP_LINK; + cap->max_average_mse = DP83TD510E_MSE_MAX; + + /* The datasheet does not specify the refresh rate or symbol count, + * but based on similar PHYs and standards, we can assume a common + * value. For 10BASE-T1L, the symbol rate is 7.5 MBd. A common + * diagnostic interval is around 1ms. + * 7.5e6 symbols/sec * 0.001 sec = 7500 symbols. + */ + cap->refresh_rate_ps = 1000000000; /* 1 ms */ + cap->num_symbols = 7500; + + return 0; +} + +static int dp83td510_get_mse_snapshot(struct phy_device *phydev, + enum phy_mse_channel channel, + struct phy_mse_snapshot *snapshot) +{ + int ret; + + if (channel != PHY_MSE_CHANNEL_LINK && + channel != PHY_MSE_CHANNEL_A) + return -EOPNOTSUPP; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT); + if (ret < 0) + return ret; + + snapshot->average_mse = ret; + + return 0; +} + static int dp83td510_led_brightness_set(struct phy_device *phydev, u8 index, enum led_brightness brightness) { @@ -893,6 +952,9 @@ static struct phy_driver dp83td510_driver[] = { .get_phy_stats = dp83td510_get_phy_stats, .update_stats = dp83td510_update_stats, + .get_mse_capability = dp83td510_get_mse_capability, + .get_mse_snapshot = dp83td510_get_mse_snapshot, + .led_brightness_set = dp83td510_led_brightness_set, .led_hw_is_supported = dp83td510_led_hw_is_supported, .led_hw_control_set = dp83td510_led_hw_control_set, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 0e1b28f06f18..715f0356f895 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -131,12 +131,6 @@ static int __fixed_phy_add(int phy_addr, return 0; } -void fixed_phy_add(const struct fixed_phy_status *status) -{ - __fixed_phy_add(0, status); -} -EXPORT_SYMBOL_GPL(fixed_phy_add); - static DEFINE_IDA(phy_fixed_ida); static void fixed_phy_del(int phy_addr) @@ -180,13 +174,11 @@ struct phy_device *fixed_phy_register(const struct fixed_phy_status *status, } /* propagate the fixed link values to struct phy_device */ - phy->link = status->link; - if (status->link) { - phy->speed = status->speed; - phy->duplex = status->duplex; - phy->pause = status->pause; - phy->asym_pause = status->asym_pause; - } + phy->link = 1; + phy->speed = status->speed; + phy->duplex = status->duplex; + phy->pause = status->pause; + phy->asym_pause = status->asym_pause; of_node_get(np); phy->mdio.dev.of_node = np; @@ -227,6 +219,17 @@ struct phy_device *fixed_phy_register(const struct fixed_phy_status *status, } EXPORT_SYMBOL_GPL(fixed_phy_register); +struct phy_device *fixed_phy_register_100fd(void) +{ + static const struct fixed_phy_status status = { + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + }; + + return fixed_phy_register(&status, NULL); +} +EXPORT_SYMBOL_GPL(fixed_phy_register_100fd); + void fixed_phy_unregister(struct phy_device *phy) { phy_device_remove(phy); diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h index 931e14660d75..6850a3f0b31e 100644 --- a/drivers/net/phy/mdio-open-alliance.h +++ b/drivers/net/phy/mdio-open-alliance.h @@ -43,4 +43,40 @@ /* Version Identifiers */ #define OATC14_IDM 0x0a00 +/* + * Open Alliance TC14 (10BASE-T1S) - Advanced Diagnostic Features Registers + * + * Refer to the OPEN Alliance documentation: + * https://opensig.org/automotive-ethernet-specifications/ + * + * Specification: + * "10BASE-T1S Advanced Diagnostic PHY Features" + * https://opensig.org/wp-content/uploads/2025/06/OPEN_Alliance_10BASE-T1S_Advanced_PHY_features_for-automotive_Ethernet_V2.1b.pdf + */ +/* Advanced Diagnostic Features Capability Register*/ +#define MDIO_OATC14_ADFCAP 0xcc00 +#define OATC14_ADFCAP_HDD_CAPABILITY GENMASK(10, 8) + +/* Harness Defect Detection Register */ +#define MDIO_OATC14_HDD 0xcc01 +#define OATC14_HDD_CONTROL BIT(15) +#define OATC14_HDD_READY BIT(14) +#define OATC14_HDD_START_CONTROL BIT(13) +#define OATC14_HDD_VALID BIT(2) +#define OATC14_HDD_SHORT_OPEN_STATUS GENMASK(1, 0) + +/* Bus Short/Open Status: + * 0 0 - no fault; everything is ok. (Default) + * 0 1 - detected as an open or missing termination(s) + * 1 0 - detected as a short or extra termination(s) + * 1 1 - fault but fault type not detectable. More details can be available by + * vender specific register if supported. + */ +enum oatc14_hdd_status { + OATC14_HDD_STATUS_CABLE_OK = 0, + OATC14_HDD_STATUS_OPEN, + OATC14_HDD_STATUS_SHORT, + OATC14_HDD_STATUS_NOT_DETECTABLE, +}; + #endif /* __MDIO_OPEN_ALLIANCE__ */ diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c index a2391d4b7e5c..4b0637405740 100644 --- a/drivers/net/phy/mdio_bus_provider.c +++ b/drivers/net/phy/mdio_bus_provider.c @@ -249,20 +249,15 @@ static int mdiobus_scan_bus_c45(struct mii_bus *bus) */ static bool mdiobus_prevent_c45_scan(struct mii_bus *bus) { - int i; + struct phy_device *phydev; - for (i = 0; i < PHY_MAX_ADDR; i++) { - struct phy_device *phydev; - u32 oui; - - phydev = mdiobus_get_phy(bus, i); - if (!phydev) - continue; - oui = phydev->phy_id >> 10; + mdiobus_for_each_phy(bus, phydev) { + u32 oui = phydev->phy_id >> 10; if (oui == MICREL_OUI) return true; } + return false; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 01c87c9b7702..57ea947369fe 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -101,6 +101,8 @@ #define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0) #define LAN8814_PAIR_BIT_SHIFT 12 +#define LAN8814_SKUS 0xB + #define LAN8814_WIRE_PAIR_MASK 0xF /* Lan8814 general Interrupt control/status reg in GPHY specific block. */ @@ -367,6 +369,9 @@ #define LAN8842_REV_8832 0x8832 +#define LAN8814_REV_LAN8814 0x8814 +#define LAN8814_REV_LAN8818 0x8818 + struct kszphy_hw_stat { const char *string; u8 reg; @@ -449,6 +454,7 @@ struct kszphy_priv { bool rmii_ref_clk_sel; bool rmii_ref_clk_sel_val; bool clk_enable; + bool is_ptp_available; u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; struct kszphy_phy_stats phy_stats; }; @@ -1056,7 +1062,7 @@ static int ksz9021_config_init(struct phy_device *phydev) #define TX_CLK_ID 0x1f /* set tx and tx_clk to "No delay adjustment" to keep 0ns - * dealy + * delay */ #define TX_ND 0x7 #define TX_CLK_ND 0xf @@ -1919,7 +1925,7 @@ static int ksz886x_config_aneg(struct phy_device *phydev) return ret; if (phydev->autoneg != AUTONEG_ENABLE) { - /* When autonegotation is disabled, we need to manually force + /* When autonegotiation is disabled, we need to manually force * the link state. If we don't do this, the PHY will keep * sending Fast Link Pulses (FLPs) which are part of the * autonegotiation process. This is not desired when @@ -2101,11 +2107,7 @@ static int ksz9477_phy_errata(struct phy_device *phydev) return err; } - err = genphy_restart_aneg(phydev); - if (err) - return err; - - return err; + return genphy_restart_aneg(phydev); } static int ksz9477_config_init(struct phy_device *phydev) @@ -2329,6 +2331,106 @@ static int kszphy_get_sqi_max(struct phy_device *phydev) return KSZ9477_SQI_MAX; } +static int kszphy_get_mse_capability(struct phy_device *phydev, + struct phy_mse_capability *cap) +{ + /* Capabilities depend on link mode: + * - 1000BASE-T: per-pair SQI registers exist => expose A..D + * and a WORST selector. + * - 100BASE-TX: HW provides a single MSE/SQI reading in the "channel A" + * register, but with auto MDI-X there is no MDI-X resolution bit, + * so we cannot map that register to a specific wire pair reliably. + * To avoid misleading per-channel data, advertise only LINK. + * Other speeds: no MSE exposure via this driver. + * + * Note: WORST is *not* a hardware selector on this family. + * We expose it because the driver computes it in software + * by scanning per-channel readouts (A..D) and picking the + * maximum average MSE. + */ + if (phydev->speed == SPEED_1000) + cap->supported_caps = PHY_MSE_CAP_CHANNEL_A | + PHY_MSE_CAP_CHANNEL_B | + PHY_MSE_CAP_CHANNEL_C | + PHY_MSE_CAP_CHANNEL_D | + PHY_MSE_CAP_WORST_CHANNEL; + else if (phydev->speed == SPEED_100) + cap->supported_caps = PHY_MSE_CAP_LINK; + else + return -EOPNOTSUPP; + + cap->max_average_mse = FIELD_MAX(KSZ9477_MMD_SQI_MASK); + cap->refresh_rate_ps = 2000000; /* 2 us */ + /* Estimated from link modulation (125 MBd per channel) and documented + * refresh rate of 2 us + */ + cap->num_symbols = 250; + + cap->supported_caps |= PHY_MSE_CAP_AVG; + + return 0; +} + +static int kszphy_get_mse_snapshot(struct phy_device *phydev, + enum phy_mse_channel channel, + struct phy_mse_snapshot *snapshot) +{ + u8 num_channels; + int ret; + + if (phydev->speed == SPEED_1000) + num_channels = 4; + else if (phydev->speed == SPEED_100) + num_channels = 1; + else + return -EOPNOTSUPP; + + if (channel == PHY_MSE_CHANNEL_WORST) { + u32 worst_val = 0; + int i; + + /* WORST is implemented in software: select the maximum + * average MSE across the available per-channel registers. + * Only defined when multiple channels exist (1000BASE-T). + */ + if (num_channels < 2) + return -EOPNOTSUPP; + + for (i = 0; i < num_channels; i++) { + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A + i); + if (ret < 0) + return ret; + + ret = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret); + if (ret > worst_val) + worst_val = ret; + } + snapshot->average_mse = worst_val; + } else if (channel == PHY_MSE_CHANNEL_LINK && num_channels == 1) { + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A); + if (ret < 0) + return ret; + snapshot->average_mse = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret); + } else if (channel >= PHY_MSE_CHANNEL_A && + channel <= PHY_MSE_CHANNEL_D) { + /* Per-channel readouts are valid only for 1000BASE-T. */ + if (phydev->speed != SPEED_1000) + return -EOPNOTSUPP; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A + channel); + if (ret < 0) + return ret; + snapshot->average_mse = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret); + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static void kszphy_enable_clk(struct phy_device *phydev) { struct kszphy_priv *priv = phydev->priv; @@ -3557,7 +3659,7 @@ static void lan8814_ptp_disable_event(struct phy_device *phydev, int event) /* Set target to too far in the future, effectively disabling it */ lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0); - /* And then reload once it recheas the target */ + /* And then reload once it reaches the target */ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG, LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event), LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event)); @@ -4150,6 +4252,17 @@ static int lan8804_config_intr(struct phy_device *phydev) return 0; } +/* Check if the PHY has 1588 support. There are multiple skus of the PHY and + * some of them support PTP while others don't support it. This function will + * return true is the sku supports it, otherwise will return false. + */ +static bool lan8814_has_ptp(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + + return priv->is_ptp_available; +} + static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { int ret = IRQ_NONE; @@ -4166,6 +4279,9 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) ret = IRQ_HANDLED; } + if (!lan8814_has_ptp(phydev)) + return ret; + while (true) { irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS); @@ -4227,6 +4343,9 @@ static void lan8814_ptp_init(struct phy_device *phydev) !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) return; + if (!lan8814_has_ptp(phydev)) + return; + lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, TSU_HARD_RESET, TSU_HARD_RESET_); @@ -4356,6 +4475,9 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name, static int lan8814_ptp_probe_once(struct phy_device *phydev) { + if (!lan8814_has_ptp(phydev)) + return 0; + return __lan8814_ptp_probe_once(phydev, "lan8814_ptp_pin", LAN8814_PTP_GPIO_NUM); } @@ -4421,7 +4543,7 @@ static int lan8814_release_coma_mode(struct phy_device *phydev) static void lan8814_clear_2psp_bit(struct phy_device *phydev) { /* It was noticed that when traffic is passing through the PHY and the - * cable is removed then the LED was still one even though there is no + * cable is removed then the LED was still on even though there is no * link */ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_EEE_STATE, @@ -4464,6 +4586,18 @@ static int lan8814_probe(struct phy_device *phydev) devm_phy_package_join(&phydev->mdio.dev, phydev, addr, sizeof(struct lan8814_shared_priv)); + /* There are lan8814 SKUs that don't support PTP. Make sure that for + * those skus no PTP device is created. Here we check if the SKU + * supports PTP. + */ + err = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, + LAN8814_SKUS); + if (err < 0) + return err; + + priv->is_ptp_available = err == LAN8814_REV_LAN8814 || + err == LAN8814_REV_LAN8818; + if (phy_package_init_once(phydev)) { /* Reset the PHY */ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, @@ -4567,7 +4701,7 @@ static int lan8841_config_init(struct phy_device *phydev) phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG, LAN8841_PTP_TX_VERSION, 0xff00); - /* 100BT Clause 40 improvenent errata */ + /* 100BT Clause 40 improvement errata */ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG, LAN8841_ANALOG_CONTROL_1, LAN8841_ANALOG_CONTROL_1_PLL_TRIM(0x2)); @@ -5587,7 +5721,7 @@ static int lan8841_ptp_extts_on(struct kszphy_ptp_priv *ptp_priv, int pin, u16 tmp = 0; int ret; - /* Set GPIO to be intput */ + /* Set GPIO to be input */ ret = phy_set_bits_mmd(phydev, 2, LAN8841_GPIO_EN, BIT(pin)); if (ret) return ret; @@ -6626,6 +6760,8 @@ static struct phy_driver ksphy_driver[] = { .cable_test_get_status = ksz9x31_cable_test_get_status, .get_sqi = kszphy_get_sqi, .get_sqi_max = kszphy_get_sqi_max, + .get_mse_capability = kszphy_get_mse_capability, + .get_mse_snapshot = kszphy_get_mse_snapshot, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index e50a0c102a86..5a0a66778977 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -3,7 +3,7 @@ * Driver for Microchip 10BASE-T1S PHYs * * Support: Microchip Phys: - * lan8670/1/2 Rev.B1/C1/C2 + * lan8670/1/2 Rev.B1/C1/C2/D0 * lan8650/1 Rev.B0/B1 Internal PHYs */ @@ -14,6 +14,7 @@ #define PHY_ID_LAN867X_REVB1 0x0007C162 #define PHY_ID_LAN867X_REVC1 0x0007C164 #define PHY_ID_LAN867X_REVC2 0x0007C165 +#define PHY_ID_LAN867X_REVD0 0x0007C166 /* Both Rev.B0 and B1 clause 22 PHYID's are same due to B1 chip limitation */ #define PHY_ID_LAN865X_REVB 0x0007C1B3 @@ -32,6 +33,17 @@ #define COL_DET_ENABLE BIT(15) #define COL_DET_DISABLE 0x0000 +/* LAN8670/1/2 Rev.D0 Link Status Selection Register */ +#define LAN867X_REG_LINK_STATUS_CTRL 0x0012 +#define LINK_STATUS_CONFIGURATION GENMASK(12, 11) +#define LINK_STATUS_SEMAPHORE BIT(0) + +/* Link Status Configuration */ +#define LINK_STATUS_CONFIG_PLCA_STATUS 0x1 +#define LINK_STATUS_CONFIG_SEMAPHORE 0x2 + +#define LINK_STATUS_SEMAPHORE_SET 0x1 + #define LAN865X_CFGPARAM_READ_ENABLE BIT(1) /* The arrays below are pulled from the following table from AN1699 @@ -109,6 +121,21 @@ static const u16 lan865x_revb_sqi_fixup_cfg_regs[3] = { 0x00AD, 0x00AE, 0x00AF, }; +/* LAN867x Rev.D0 configuration parameters from AN1699 + * As per the Configuration Application Note AN1699 published in the below link, + * https://www.microchip.com/en-us/application-notes/an1699 + * Revision G (DS60001699G - October 2025) + */ +static const u16 lan867x_revd0_fixup_regs[8] = { + 0x0037, 0x008A, 0x0118, 0x00D6, + 0x0082, 0x00FD, 0x00FD, 0x0091, +}; + +static const u16 lan867x_revd0_fixup_values[8] = { + 0x0800, 0xBFC0, 0x029C, 0x1001, + 0x001C, 0x0C0B, 0x8C07, 0x9660, +}; + /* Pulled from AN1760 describing 'indirect read' * * write_register(0x4, 0x00D8, addr) @@ -377,6 +404,32 @@ static int lan867x_revb1_config_init(struct phy_device *phydev) return 0; } +static int lan867x_revd0_link_active_selection(struct phy_device *phydev, + bool plca_enabled) +{ + u16 value; + + if (plca_enabled) { + /* 0x1 - When PLCA is enabled: link status reflects plca_status. + */ + value = FIELD_PREP(LINK_STATUS_CONFIGURATION, + LINK_STATUS_CONFIG_PLCA_STATUS); + } else { + /* 0x2 - Link status is controlled by the value written into the + * LINK_STATUS_SEMAPHORE bit written. Here the link semaphore + * bit is written with 0x1 to set the link always active in + * CSMA/CD mode as it doesn't support autoneg. + */ + value = FIELD_PREP(LINK_STATUS_CONFIGURATION, + LINK_STATUS_CONFIG_SEMAPHORE) | + FIELD_PREP(LINK_STATUS_SEMAPHORE, + LINK_STATUS_SEMAPHORE_SET); + } + + return phy_write_mmd(phydev, MDIO_MMD_VEND2, + LAN867X_REG_LINK_STATUS_CTRL, value); +} + /* As per LAN8650/1 Rev.B0/B1 AN1760 (Revision F (DS60001760G - June 2024)) and * LAN8670/1/2 Rev.C1/C2 AN1699 (Revision E (DS60001699F - June 2024)), under * normal operation, the device should be operated in PLCA mode. Disabling @@ -393,6 +446,14 @@ static int lan86xx_plca_set_cfg(struct phy_device *phydev, { int ret; + /* Link status selection must be configured for LAN8670/1/2 Rev.D0 */ + if (phydev->phy_id == PHY_ID_LAN867X_REVD0) { + ret = lan867x_revd0_link_active_selection(phydev, + plca_cfg->enabled); + if (ret) + return ret; + } + ret = genphy_c45_plca_set_cfg(phydev, plca_cfg); if (ret) return ret; @@ -407,6 +468,29 @@ static int lan86xx_plca_set_cfg(struct phy_device *phydev, COL_DET_CTRL0_ENABLE_BIT_MASK, COL_DET_ENABLE); } +static int lan867x_revd0_config_init(struct phy_device *phydev) +{ + int ret; + + ret = lan867x_check_reset_complete(phydev); + if (ret) + return ret; + + for (int i = 0; i < ARRAY_SIZE(lan867x_revd0_fixup_regs); i++) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + lan867x_revd0_fixup_regs[i], + lan867x_revd0_fixup_values[i]); + if (ret) + return ret; + } + + /* Initially the PHY will be in CSMA/CD mode by default. So it is + * required to set the link always active as it doesn't support + * autoneg. + */ + return lan867x_revd0_link_active_selection(phydev, false); +} + static int lan86xx_read_status(struct phy_device *phydev) { /* The phy has some limitations, namely: @@ -482,6 +566,17 @@ static struct phy_driver microchip_t1s_driver[] = { .get_plca_status = genphy_c45_plca_get_status, }, { + PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVD0), + .name = "LAN867X Rev.D0", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = lan867x_revd0_config_init, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = lan86xx_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + .cable_test_start = genphy_c45_oatc14_cable_test_start, + .cable_test_get_status = genphy_c45_oatc14_cable_test_get_status, + }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB), .name = "LAN865X Rev.B0/B1 Internal Phy", .features = PHY_BASIC_T1S_P2MP_FEATURES, @@ -501,6 +596,7 @@ static const struct mdio_device_id __maybe_unused tbl[] = { { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) }, { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) }, { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) }, + { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVD0) }, { PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB) }, { } }; diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index a3593e663059..89b5b19a9bd2 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -3048,6 +3048,9 @@ static struct phy_driver motorcomm_phy_drvs[] = { .get_wol = ytphy_get_wol, .set_wol = yt8531_set_wol, .link_change_notify = yt8531_link_change_notify, + .led_hw_is_supported = yt8521_led_hw_is_supported, + .led_hw_control_set = yt8521_led_hw_control_set, + .led_hw_control_get = yt8521_led_hw_control_get, }, { PHY_ID_MATCH_EXACT(PHY_ID_YT8531S), diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index 2d8eca54c40a..2eef5956b9cc 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -289,12 +289,12 @@ enum rgmii_clock_delay { #define PHY_ID_VSC8540 0x00070760 #define PHY_ID_VSC8541 0x00070770 #define PHY_ID_VSC8552 0x000704e0 -#define PHY_ID_VSC856X 0x000707e0 +#define PHY_ID_VSC856X 0x000707e1 #define PHY_ID_VSC8572 0x000704d0 #define PHY_ID_VSC8574 0x000704a0 -#define PHY_ID_VSC8575 0x000707d0 -#define PHY_ID_VSC8582 0x000707b0 -#define PHY_ID_VSC8584 0x000707c0 +#define PHY_ID_VSC8575 0x000707d1 +#define PHY_ID_VSC8582 0x000707b1 +#define PHY_ID_VSC8584 0x000707c1 #define PHY_VENDOR_MSCC 0x00070400 #define MSCC_VDDMAC_1500 1500 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index ef0ef1570d39..8678ebf89cca 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1724,12 +1724,6 @@ static int vsc8584_config_init(struct phy_device *phydev) * in this pre-init function. */ if (phy_package_init_once(phydev)) { - /* The following switch statement assumes that the lowest - * nibble of the phy_id_mask is always 0. This works because - * the lowest nibble of the PHY_ID's below are also 0. - */ - WARN_ON(phydev->drv->phy_id_mask & 0xf); - switch (phydev->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_VSC8504: case PHY_ID_VSC8552: @@ -2290,11 +2284,6 @@ static int vsc8584_probe(struct phy_device *phydev) VSC8531_DUPLEX_COLLISION}; int ret; - if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) { - dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n"); - return -ENOTSUPP; - } - vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); if (!vsc8531) return -ENOMEM; @@ -2587,9 +2576,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_inband = vsc85xx_config_inband, }, { - .phy_id = PHY_ID_VSC856X, + PHY_ID_MATCH_EXACT(PHY_ID_VSC856X), .name = "Microsemi GE VSC856X SyncE", - .phy_id_mask = 0xfffffff0, /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, @@ -2625,7 +2613,7 @@ static struct phy_driver vsc85xx_driver[] = { .suspend = &genphy_suspend, .resume = &genphy_resume, .remove = &vsc85xx_remove, - .probe = &vsc8574_probe, + .probe = &vsc8584_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, @@ -2648,12 +2636,12 @@ static struct phy_driver vsc85xx_driver[] = { .config_aneg = &vsc85xx_config_aneg, .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, - .handle_interrupt = vsc85xx_handle_interrupt, + .handle_interrupt = vsc8584_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, .remove = &vsc85xx_remove, - .probe = &vsc8574_probe, + .probe = &vsc8584_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, @@ -2667,9 +2655,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_inband = vsc85xx_config_inband, }, { - .phy_id = PHY_ID_VSC8575, + PHY_ID_MATCH_EXACT(PHY_ID_VSC8575), .name = "Microsemi GE VSC8575 SyncE", - .phy_id_mask = 0xfffffff0, /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, @@ -2693,9 +2680,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_inband = vsc85xx_config_inband, }, { - .phy_id = PHY_ID_VSC8582, + PHY_ID_MATCH_EXACT(PHY_ID_VSC8582), .name = "Microsemi GE VSC8582 SyncE", - .phy_id_mask = 0xfffffff0, /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, @@ -2719,9 +2705,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_inband = vsc85xx_config_inband, }, { - .phy_id = PHY_ID_VSC8584, + PHY_ID_MATCH_EXACT(PHY_ID_VSC8584), .name = "Microsemi GE VSC8584 SyncE", - .phy_id_mask = 0xfffffff0, /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 61670be0f095..e8e5be4684ab 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -7,6 +7,7 @@ #include <linux/mdio.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/ethtool_netlink.h> #include "mdio-open-alliance.h" #include "phylib-internal.h" @@ -485,8 +486,8 @@ static int genphy_c45_baset1_read_lpa(struct phy_device *phydev) mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0); mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0); - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; return 0; } @@ -498,8 +499,8 @@ static int genphy_c45_baset1_read_lpa(struct phy_device *phydev) return val; mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val); - phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0; - phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0; + phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP; + phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M); if (val < 0) @@ -536,8 +537,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev) phydev->lp_advertising); mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0); mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0); - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; return 0; } @@ -551,8 +552,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev) return val; mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val); - phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; - phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; + phydev->pause = val & LPA_PAUSE_CAP; + phydev->asym_pause = val & LPA_PAUSE_ASYM; /* Read the link partner's 10G advertisement */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); @@ -1171,8 +1172,8 @@ int genphy_c45_read_status(struct phy_device *phydev) phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; if (phydev->autoneg == AUTONEG_ENABLE) { ret = genphy_c45_read_lpa(phydev); @@ -1573,3 +1574,124 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev, return ret; } EXPORT_SYMBOL(genphy_c45_ethtool_set_eee); + +/** + * oatc14_cable_test_get_result_code - Convert hardware cable test status to + * ethtool result code. + * @status: The hardware-reported cable test status + * + * This helper function maps the OATC14 HDD cable test status to the + * corresponding ethtool cable test result code. It provides a translation + * between the device-specific status values and the standardized ethtool + * result codes. + * + * Return: + * * ETHTOOL_A_CABLE_RESULT_CODE_OK - Cable is OK + * * ETHTOOL_A_CABLE_RESULT_CODE_OPEN - Open circuit detected + * * ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT - Short circuit detected + * * ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC - Status not detectable or invalid + */ +static int oatc14_cable_test_get_result_code(enum oatc14_hdd_status status) +{ + switch (status) { + case OATC14_HDD_STATUS_CABLE_OK: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case OATC14_HDD_STATUS_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case OATC14_HDD_STATUS_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case OATC14_HDD_STATUS_NOT_DETECTABLE: + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +/** + * genphy_c45_oatc14_cable_test_get_status - Get status of OATC14 10Base-T1S + * PHY cable test. + * @phydev: pointer to the PHY device structure + * @finished: pointer to a boolean set true if the test is complete + * + * Retrieves the current status of the OATC14 10Base-T1S PHY cable test. + * This function reads the OATC14 HDD register to determine whether the test + * results are valid and whether the test has finished. + * + * If the test is complete, the function reports the cable test result via + * the ethtool cable test interface using ethnl_cable_test_result(), and then + * clears the test control bit in the PHY register to reset the test state. + * + * Return: 0 on success, or a negative error code on failure (e.g. register + * read/write error). + */ +int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret; + u8 sts; + + *finished = false; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD); + if (ret < 0) + return ret; + + if (!(ret & OATC14_HDD_VALID)) + return 0; + + *finished = true; + + sts = FIELD_GET(OATC14_HDD_SHORT_OPEN_STATUS, ret); + + ret = ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + oatc14_cable_test_get_result_code(sts)); + if (ret) + return ret; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_HDD, OATC14_HDD_CONTROL); +} +EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_get_status); + +/** + * genphy_c45_oatc14_cable_test_start - Start a cable test on an OATC14 + * 10Base-T1S PHY. + * @phydev: Pointer to the PHY device structure + * + * This function initiates a cable diagnostic test on a Clause 45 OATC14 + * 10Base-T1S capable PHY device. It first reads the PHY’s advanced diagnostic + * capability register to check if High Definition Diagnostics (HDD) mode is + * supported. If the PHY does not report HDD capability, cable testing is not + * supported and the function returns -EOPNOTSUPP. + * + * For PHYs that support HDD, the function sets the appropriate control bits in + * the OATC14_HDD register to enable and start the cable diagnostic test. + * + * Return: + * * 0 on success + * * -EOPNOTSUPP if the PHY does not support HDD capability + * * A negative error code on I/O or register access failures + */ +int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_ADFCAP); + if (ret < 0) + return ret; + + if (!(ret & OATC14_ADFCAP_HDD_CAPABILITY)) + return -EOPNOTSUPP; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD, + OATC14_HDD_CONTROL); + if (ret) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD); + if (ret < 0) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD, + OATC14_HDD_START_CONTROL); +} +EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_start); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7a67c900e79a..81984d4ebb7c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -251,6 +251,16 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev) return wol.wolopts != 0; } +bool phy_may_wakeup(struct phy_device *phydev) +{ + /* If the PHY is using driver-model based wakeup, use that state. */ + if (phy_can_wakeup(phydev)) + return device_may_wakeup(&phydev->mdio.dev); + + return phy_drv_wol_enabled(phydev); +} +EXPORT_SYMBOL_GPL(phy_may_wakeup); + static void phy_link_change(struct phy_device *phydev, bool up) { struct net_device *netdev = phydev->attached_dev; @@ -302,7 +312,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) /* If the PHY on the mido bus is not attached but has WOL enabled * we cannot suspend the PHY. */ - if (!netdev && phy_drv_wol_enabled(phydev)) + if (!netdev && phy_may_wakeup(phydev)) return false; /* PHY not attached? May suspend if the PHY has not already been @@ -815,8 +825,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, dev->speed = SPEED_UNKNOWN; dev->duplex = DUPLEX_UNKNOWN; - dev->pause = 0; - dev->asym_pause = 0; + dev->pause = false; + dev->asym_pause = false; dev->link = 0; dev->port = PORT_TP; dev->interface = PHY_INTERFACE_MODE_GMII; @@ -1214,22 +1224,24 @@ int phy_get_c45_ids(struct phy_device *phydev) EXPORT_SYMBOL(phy_get_c45_ids); /** - * phy_find_first - finds the first PHY device on the bus + * phy_find_next - finds the next PHY device on the bus * @bus: the target MII bus + * @pos: cursor + * + * Return: next phy_device on the bus, or NULL */ -struct phy_device *phy_find_first(struct mii_bus *bus) +struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos) { - struct phy_device *phydev; - int addr; + for (int addr = pos ? pos->mdio.addr + 1 : 0; + addr < PHY_MAX_ADDR; addr++) { + struct phy_device *phydev = mdiobus_get_phy(bus, addr); - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phydev = mdiobus_get_phy(bus, addr); if (phydev) return phydev; } return NULL; } -EXPORT_SYMBOL(phy_find_first); +EXPORT_SYMBOL_GPL(phy_find_next); /** * phy_prepare_link - prepares the PHY layer to monitor link status @@ -1909,7 +1921,7 @@ int phy_suspend(struct phy_device *phydev) if (phydev->suspended || !phydrv) return 0; - phydev->wol_enabled = phy_drv_wol_enabled(phydev) || + phydev->wol_enabled = phy_may_wakeup(phydev) || (netdev && netdev->ethtool->wol_enabled); /* If the device has WOL enabled, we cannot suspend the PHY */ if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND)) @@ -2080,8 +2092,8 @@ int genphy_setup_forced(struct phy_device *phydev) { u16 ctl; - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); @@ -2488,8 +2500,8 @@ int genphy_read_status(struct phy_device *phydev) phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; if (phydev->is_gigabit_capable) { err = genphy_read_master_slave(phydev); @@ -2542,8 +2554,8 @@ int genphy_c37_read_status(struct phy_device *phydev, bool *changed) /* Signal link has changed */ *changed = true; phydev->duplex = DUPLEX_UNKNOWN; - phydev->pause = 0; - phydev->asym_pause = 0; + phydev->pause = false; + phydev->asym_pause = false; if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { lpa = phy_read(phydev, MII_LPA); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 9d7799ea1c17..6e1243bf68aa 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -93,6 +93,9 @@ struct phylink { u8 sfp_port; struct eee_config eee_cfg; + + u32 wolopts_mac; + u8 wol_sopass[SOPASS_MAX]; }; #define phylink_printk(level, pl, fmt, ...) \ @@ -2562,6 +2565,23 @@ void phylink_rx_clk_stop_unblock(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock); +static bool phylink_mac_supports_wol(struct phylink *pl) +{ + return !!pl->mac_ops->mac_wol_set; +} + +static bool phylink_phy_supports_wol(struct phylink *pl, + struct phy_device *phydev) +{ + return phydev && (pl->config->wol_phy_legacy || phy_can_wakeup(phydev)); +} + +static bool phylink_phy_pm_speed_ctrl(struct phylink *pl) +{ + return pl->config->wol_phy_speed_ctrl && !pl->wolopts_mac && + pl->phydev && phy_may_wakeup(pl->phydev); +} + /** * phylink_suspend() - handle a network device suspend event * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -2575,11 +2595,17 @@ EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock); * can also bring down the link between the MAC and PHY. * - If Wake-on-Lan is active, but being handled by the MAC, the MAC * still needs to receive packets, so we can not bring the link down. + * + * Note: when phylink managed Wake-on-Lan is in use, @mac_wol is ignored. + * (struct phylink_mac_ops.mac_set_wol populated.) */ void phylink_suspend(struct phylink *pl, bool mac_wol) { ASSERT_RTNL(); + if (phylink_mac_supports_wol(pl)) + mac_wol = !!pl->wolopts_mac; + if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) { /* Wake-on-Lan enabled, MAC handling */ mutex_lock(&pl->state_mutex); @@ -2605,6 +2631,9 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) } else { phylink_stop(pl); } + + if (phylink_phy_pm_speed_ctrl(pl)) + phylink_speed_down(pl, false); } EXPORT_SYMBOL_GPL(phylink_suspend); @@ -2644,6 +2673,9 @@ void phylink_resume(struct phylink *pl) { ASSERT_RTNL(); + if (phylink_phy_pm_speed_ctrl(pl)) + phylink_speed_up(pl); + if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) { /* Wake-on-Lan enabled, MAC handling */ @@ -2689,8 +2721,24 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) wol->supported = 0; wol->wolopts = 0; - if (pl->phydev) - phy_ethtool_get_wol(pl->phydev, wol); + if (phylink_mac_supports_wol(pl)) { + if (phylink_phy_supports_wol(pl, pl->phydev)) + phy_ethtool_get_wol(pl->phydev, wol); + + /* Where the MAC augments the WoL support, merge its support and + * current configuration. + */ + if (~wol->wolopts & pl->wolopts_mac & WAKE_MAGICSECURE) + memcpy(wol->sopass, pl->wol_sopass, + sizeof(wol->sopass)); + + wol->supported |= pl->config->wol_mac_support; + wol->wolopts |= pl->wolopts_mac; + } else { + /* Legacy */ + if (pl->phydev) + phy_ethtool_get_wol(pl->phydev, wol); + } } EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); @@ -2707,12 +2755,48 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); */ int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) { + struct ethtool_wolinfo w = { .cmd = ETHTOOL_GWOL }; int ret = -EOPNOTSUPP; + bool changed; + u32 wolopts; ASSERT_RTNL(); - if (pl->phydev) - ret = phy_ethtool_set_wol(pl->phydev, wol); + if (phylink_mac_supports_wol(pl)) { + wolopts = wol->wolopts; + + if (phylink_phy_supports_wol(pl, pl->phydev)) { + ret = phy_ethtool_set_wol(pl->phydev, wol); + if (ret != 0 && ret != -EOPNOTSUPP) + return ret; + + phy_ethtool_get_wol(pl->phydev, &w); + + /* Any Wake-on-Lan modes which the PHY is handling + * should not be passed on to the MAC. + */ + wolopts &= ~w.wolopts; + } + + wolopts &= pl->config->wol_mac_support; + changed = pl->wolopts_mac != wolopts; + if (wolopts & WAKE_MAGICSECURE) + changed |= !!memcmp(wol->sopass, pl->wol_sopass, + sizeof(wol->sopass)); + memcpy(pl->wol_sopass, wol->sopass, sizeof(pl->wol_sopass)); + + if (changed) { + ret = pl->mac_ops->mac_wol_set(pl->config, wolopts, + wol->sopass); + if (!ret) + pl->wolopts_mac = wolopts; + } else { + ret = 0; + } + } else { + if (pl->phydev) + ret = phy_ethtool_set_wol(pl->phydev, wol); + } return ret; } diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs index 0b9400dcb4c1..aaaead6512a0 100644 --- a/drivers/net/phy/qt2025.rs +++ b/drivers/net/phy/qt2025.rs @@ -12,6 +12,7 @@ use kernel::c_str; use kernel::error::code; use kernel::firmware::Firmware; +use kernel::io::poll::read_poll_timeout; use kernel::net::phy::{ self, reg::{Mmd, C45}, @@ -19,6 +20,7 @@ use kernel::net::phy::{ }; use kernel::prelude::*; use kernel::sizes::{SZ_16K, SZ_8K}; +use kernel::time::Delta; kernel::module_phy_driver! { drivers: [PhyQT2025], @@ -93,7 +95,13 @@ impl Driver for PhyQT2025 { // The micro-controller will start running from SRAM. dev.write(C45::new(Mmd::PCS, 0xe854), 0x0040)?; - // TODO: sleep here until the hw becomes ready. + read_poll_timeout( + || dev.read(C45::new(Mmd::PCS, 0xd7fd)), + |val| *val != 0x00 && *val != 0x10, + Delta::from_millis(50), + Delta::from_secs(3), + )?; + Ok(()) } diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c index 16a347084293..417f9a88aab6 100644 --- a/drivers/net/phy/realtek/realtek_main.c +++ b/drivers/net/phy/realtek/realtek_main.c @@ -8,6 +8,7 @@ * Copyright (c) 2004 Freescale Semiconductor, Inc. */ #include <linux/bitops.h> +#include <linux/ethtool_netlink.h> #include <linux/of.h> #include <linux/phy.h> #include <linux/pm_wakeirq.h> @@ -127,6 +128,32 @@ */ #define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg)) +#define RTL8221B_VND2_INER 0xa4d2 +#define RTL8221B_VND2_INER_LINK_STATUS BIT(4) + +#define RTL8221B_VND2_INSR 0xa4d4 + +#define RTL8224_MII_RTCT 0x11 +#define RTL8224_MII_RTCT_ENABLE BIT(0) +#define RTL8224_MII_RTCT_PAIR_A BIT(4) +#define RTL8224_MII_RTCT_PAIR_B BIT(5) +#define RTL8224_MII_RTCT_PAIR_C BIT(6) +#define RTL8224_MII_RTCT_PAIR_D BIT(7) +#define RTL8224_MII_RTCT_DONE BIT(15) + +#define RTL8224_MII_SRAM_ADDR 0x1b +#define RTL8224_MII_SRAM_DATA 0x1c + +#define RTL8224_SRAM_RTCT_FAULT(pair) (0x8026 + (pair) * 4) +#define RTL8224_SRAM_RTCT_FAULT_BUSY BIT(0) +#define RTL8224_SRAM_RTCT_FAULT_OPEN BIT(3) +#define RTL8224_SRAM_RTCT_FAULT_SAME_SHORT BIT(4) +#define RTL8224_SRAM_RTCT_FAULT_OK BIT(5) +#define RTL8224_SRAM_RTCT_FAULT_DONE BIT(6) +#define RTL8224_SRAM_RTCT_FAULT_CROSS_SHORT BIT(7) + +#define RTL8224_SRAM_RTCT_LEN(pair) (0x8028 + (pair) * 4) + #define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE_ON BIT(12) @@ -1453,6 +1480,168 @@ static int rtl822xb_c45_read_status(struct phy_device *phydev) return 0; } +static int rtl8224_cable_test_start(struct phy_device *phydev) +{ + u32 val; + int ret; + + /* disable auto-negotiation and force 1000/Full */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(MII_BMCR), + BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED10, + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (ret) + return ret; + + mdelay(500); + + /* trigger cable test */ + val = RTL8224_MII_RTCT_ENABLE; + val |= RTL8224_MII_RTCT_PAIR_A; + val |= RTL8224_MII_RTCT_PAIR_B; + val |= RTL8224_MII_RTCT_PAIR_C; + val |= RTL8224_MII_RTCT_PAIR_D; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(RTL8224_MII_RTCT), + RTL8224_MII_RTCT_DONE, val); +} + +static int rtl8224_sram_read(struct phy_device *phydev, u32 reg) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(RTL8224_MII_SRAM_ADDR), + reg); + if (ret) + return ret; + + return phy_read_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(RTL8224_MII_SRAM_DATA)); +} + +static int rtl8224_pair_len_get(struct phy_device *phydev, u32 pair) +{ + int cable_len; + u32 reg_len; + int ret; + u32 cm; + + reg_len = RTL8224_SRAM_RTCT_LEN(pair); + + ret = rtl8224_sram_read(phydev, reg_len); + if (ret < 0) + return ret; + + cable_len = ret & 0xff00; + + ret = rtl8224_sram_read(phydev, reg_len + 1); + if (ret < 0) + return ret; + + cable_len |= (ret & 0xff00) >> 8; + + cable_len -= 620; + cable_len = max(cable_len, 0); + + cm = cable_len * 100 / 78; + + return cm; +} + +static int rtl8224_cable_test_result_trans(u32 result) +{ + if (!(result & RTL8224_SRAM_RTCT_FAULT_DONE)) + return -EBUSY; + + if (result & RTL8224_SRAM_RTCT_FAULT_OK) + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + + if (result & RTL8224_SRAM_RTCT_FAULT_OPEN) + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + + if (result & RTL8224_SRAM_RTCT_FAULT_SAME_SHORT) + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + + if (result & RTL8224_SRAM_RTCT_FAULT_BUSY) + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + + if (result & RTL8224_SRAM_RTCT_FAULT_CROSS_SHORT) + return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT; + + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; +} + +static int rtl8224_cable_test_report_pair(struct phy_device *phydev, unsigned int pair) +{ + int fault_rslt; + int ret; + + ret = rtl8224_sram_read(phydev, RTL8224_SRAM_RTCT_FAULT(pair)); + if (ret < 0) + return ret; + + fault_rslt = rtl8224_cable_test_result_trans(ret); + if (fault_rslt < 0) + return 0; + + ret = ethnl_cable_test_result(phydev, pair, fault_rslt); + if (ret < 0) + return ret; + + switch (fault_rslt) { + case ETHTOOL_A_CABLE_RESULT_CODE_OPEN: + case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT: + case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT: + ret = rtl8224_pair_len_get(phydev, pair); + if (ret < 0) + return ret; + + return ethnl_cable_test_fault_length(phydev, pair, ret); + default: + return 0; + } +} + +static int rtl8224_cable_test_report(struct phy_device *phydev, bool *finished) +{ + unsigned int pair; + int ret; + + for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) { + ret = rtl8224_cable_test_report_pair(phydev, pair); + if (ret == -EBUSY) { + *finished = false; + return 0; + } + + if (ret < 0) + return ret; + } + + return 0; +} + +static int rtl8224_cable_test_get_status(struct phy_device *phydev, bool *finished) +{ + int ret; + + *finished = false; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(RTL8224_MII_RTCT)); + if (ret < 0) + return ret; + + if (!(ret & RTL8224_MII_RTCT_DONE)) + return 0; + + *finished = true; + + return rtl8224_cable_test_report(phydev, finished); +} + static bool rtlgen_supports_2_5gbps(struct phy_device *phydev) { int val; @@ -1696,6 +1885,53 @@ static irqreturn_t rtl9000a_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } +static int rtl8221b_ack_interrupt(struct phy_device *phydev) +{ + int err; + + err = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_VND2_INSR); + + return (err < 0) ? err : 0; +} + +static int rtl8221b_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = rtl8221b_ack_interrupt(phydev); + if (err) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_VND2_INER, + RTL8221B_VND2_INER_LINK_STATUS); + } else { + err = phy_write_mmd(phydev, MDIO_MMD_VEND2, + RTL8221B_VND2_INER, 0); + if (err) + return err; + + err = rtl8221b_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t rtl8221b_handle_interrupt(struct phy_device *phydev) +{ + int err; + + err = rtl8221b_ack_interrupt(phydev); + if (err) { + phy_error(phydev); + return IRQ_NONE; + } + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static struct phy_driver realtek_drvs[] = { { PHY_ID_MATCH_EXACT(0x00008201), @@ -1870,6 +2106,8 @@ static struct phy_driver realtek_drvs[] = { }, { .match_phy_device = rtl8221b_vb_cg_c45_match_phy_device, .name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)", + .config_intr = rtl8221b_config_intr, + .handle_interrupt = rtl8221b_handle_interrupt, .probe = rtl822x_probe, .config_init = rtl822xb_config_init, .get_rate_matching = rtl822xb_get_rate_matching, @@ -1894,6 +2132,8 @@ static struct phy_driver realtek_drvs[] = { }, { .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device, .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)", + .config_intr = rtl8221b_config_intr, + .handle_interrupt = rtl8221b_handle_interrupt, .probe = rtl822x_probe, .config_init = rtl822xb_config_init, .get_rate_matching = rtl822xb_get_rate_matching, @@ -1930,11 +2170,14 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001ccad0), .name = "RTL8224 2.5Gbps PHY", + .flags = PHY_POLL_CABLE_TEST, .get_features = rtl822x_c45_get_features, .config_aneg = rtl822x_c45_config_aneg, .read_status = rtl822x_c45_read_status, .suspend = genphy_c45_pma_suspend, .resume = rtlgen_c45_resume, + .cable_test_start = rtl8224_cable_test_start, + .cable_test_get_status = rtl8224_cable_test_get_status, }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 4ac6afce267b..4275b393a454 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -608,8 +608,8 @@ static int pppoe_release(struct socket *sock) return 0; } -static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, - int sockaddr_len, int flags) +static int pppoe_connect(struct socket *sock, struct sockaddr_unsized *uservaddr, + int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 90737cb71892..b18acd810561 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -382,8 +382,8 @@ drop: return NET_RX_DROP; } -static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, - int sockaddr_len) +static int pptp_bind(struct socket *sock, struct sockaddr_unsized *uservaddr, + int sockaddr_len) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; @@ -415,8 +415,8 @@ out: return error; } -static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, - int sockaddr_len, int flags) +static int pptp_connect(struct socket *sock, struct sockaddr_unsized *uservaddr, + int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index f4e91ba64a66..134435e90073 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -30,6 +30,8 @@ #define PD692X0_FW_MIN_VER 5 #define PD692X0_FW_PATCH_VER 5 +#define PD692X0_USER_BYTE 42 + enum pd692x0_fw_state { PD692X0_FW_UNKNOWN, PD692X0_FW_OK, @@ -80,11 +82,17 @@ enum { PD692X0_MSG_GET_PORT_PARAM, PD692X0_MSG_GET_POWER_BANK, PD692X0_MSG_SET_POWER_BANK, + PD692X0_MSG_SET_USER_BYTE, /* add new message above here */ PD692X0_MSG_CNT }; +struct pd692x0_matrix { + u8 hw_port_a; + u8 hw_port_b; +}; + struct pd692x0_priv { struct i2c_client *client; struct pse_controller_dev pcdev; @@ -98,9 +106,12 @@ struct pd692x0_priv { bool last_cmd_key; unsigned long last_cmd_key_time; + bool cfg_saved; enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS]; struct regulator_dev *manager_reg[PD692X0_MAX_MANAGERS]; int manager_pw_budget[PD692X0_MAX_MANAGERS]; + int nmanagers; + struct pd692x0_matrix *port_matrix; }; /* Template list of communication messages. The non-null bytes defined here @@ -186,6 +197,12 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = { .key = PD692X0_KEY_CMD, .sub = {0x07, 0x0b, 0x57}, }, + [PD692X0_MSG_SET_USER_BYTE] = { + .key = PD692X0_KEY_PRG, + .sub = {0x41, PD692X0_USER_BYTE}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, }; static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo) @@ -809,11 +826,6 @@ struct pd692x0_manager { int nports; }; -struct pd692x0_matrix { - u8 hw_port_a; - u8 hw_port_b; -}; - static int pd692x0_of_get_ports_manager(struct pd692x0_priv *priv, struct pd692x0_manager *manager, @@ -903,7 +915,8 @@ pd692x0_of_get_managers(struct pd692x0_priv *priv, } of_node_put(managers_node); - return nmanagers; + priv->nmanagers = nmanagers; + return 0; out: for (i = 0; i < nmanagers; i++) { @@ -963,8 +976,7 @@ pd692x0_register_manager_regulator(struct device *dev, char *reg_name, static int pd692x0_register_managers_regulator(struct pd692x0_priv *priv, - const struct pd692x0_manager *manager, - int nmanagers) + const struct pd692x0_manager *manager) { struct device *dev = &priv->client->dev; size_t reg_name_len; @@ -975,7 +987,7 @@ pd692x0_register_managers_regulator(struct pd692x0_priv *priv, */ reg_name_len = strlen(dev_name(dev)) + 23; - for (i = 0; i < nmanagers; i++) { + for (i = 0; i < priv->nmanagers; i++) { static const char * const regulators[] = { "vaux5", "vaux3p3" }; struct regulator_dev *rdev; char *reg_name; @@ -1008,10 +1020,14 @@ pd692x0_register_managers_regulator(struct pd692x0_priv *priv, } static int -pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id, int pw) +pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id) { struct pd692x0_msg msg, buf; - int ret, pw_mW = pw / 1000; + int ret, pw_mW; + + pw_mW = priv->manager_pw_budget[id] / 1000; + if (!pw_mW) + return 0; msg = pd692x0_msg_template_list[PD692X0_MSG_GET_POWER_BANK]; msg.data[0] = id; @@ -1032,11 +1048,11 @@ pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id, int pw) } static int -pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers) +pd692x0_req_managers_pw_budget(struct pd692x0_priv *priv) { int i, ret; - for (i = 0; i < nmanagers; i++) { + for (i = 0; i < priv->nmanagers; i++) { struct regulator *supply = priv->manager_reg[i]->supply; int pw_budget; @@ -1053,7 +1069,18 @@ pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers) return ret; priv->manager_pw_budget[i] = pw_budget; - ret = pd692x0_conf_manager_power_budget(priv, i, pw_budget); + } + + return 0; +} + +static int +pd692x0_configure_managers(struct pd692x0_priv *priv) +{ + int i, ret; + + for (i = 0; i < priv->nmanagers; i++) { + ret = pd692x0_conf_manager_power_budget(priv, i); if (ret < 0) return ret; } @@ -1101,10 +1128,9 @@ pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset, static int pd692x0_set_ports_matrix(struct pd692x0_priv *priv, - const struct pd692x0_manager *manager, - int nmanagers, - struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]) + const struct pd692x0_manager *manager) { + struct pd692x0_matrix *port_matrix = priv->port_matrix; struct pse_controller_dev *pcdev = &priv->pcdev; int i, ret; @@ -1117,7 +1143,7 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv, /* Update with values for every PSE PIs */ for (i = 0; i < pcdev->nr_lines; i++) { ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[0], - manager, nmanagers, + manager, priv->nmanagers, &port_matrix[i]); if (ret) { dev_err(&priv->client->dev, @@ -1126,7 +1152,7 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv, } ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[1], - manager, nmanagers, + manager, priv->nmanagers, &port_matrix[i]); if (ret) { dev_err(&priv->client->dev, @@ -1139,9 +1165,9 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv, } static int -pd692x0_write_ports_matrix(struct pd692x0_priv *priv, - const struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]) +pd692x0_write_ports_matrix(struct pd692x0_priv *priv) { + struct pd692x0_matrix *port_matrix = priv->port_matrix; struct pd692x0_msg msg, buf; int ret, i; @@ -1166,13 +1192,32 @@ pd692x0_write_ports_matrix(struct pd692x0_priv *priv, return 0; } +static int pd692x0_hw_conf_init(struct pd692x0_priv *priv) +{ + int ret; + + /* Is PD692x0 ready to be configured? */ + if (priv->fw_state != PD692X0_FW_OK && + priv->fw_state != PD692X0_FW_COMPLETE) + return 0; + + ret = pd692x0_configure_managers(priv); + if (ret) + return ret; + + ret = pd692x0_write_ports_matrix(priv); + if (ret) + return ret; + + return 0; +} + static void pd692x0_of_put_managers(struct pd692x0_priv *priv, - struct pd692x0_manager *manager, - int nmanagers) + struct pd692x0_manager *manager) { int i, j; - for (i = 0; i < nmanagers; i++) { + for (i = 0; i < priv->nmanagers; i++) { for (j = 0; j < manager[i].nports; j++) of_node_put(manager[i].port_node[j]); of_node_put(manager[i].node); @@ -1198,50 +1243,71 @@ static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv) } } +static int +pd692x0_save_user_byte(struct pd692x0_priv *priv) +{ + struct pd692x0_msg msg, buf; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_USER_BYTE]; + return pd692x0_sendrecv_msg(priv, &msg, &buf); +} + static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) { - struct pd692x0_manager *manager __free(kfree) = NULL; struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); - struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]; - int ret, nmanagers; - - /* Should we flash the port matrix */ - if (priv->fw_state != PD692X0_FW_OK && - priv->fw_state != PD692X0_FW_COMPLETE) - return 0; + struct pd692x0_matrix *port_matrix; + struct pd692x0_manager *manager; + int ret; manager = kcalloc(PD692X0_MAX_MANAGERS, sizeof(*manager), GFP_KERNEL); if (!manager) return -ENOMEM; + port_matrix = devm_kcalloc(&priv->client->dev, PD692X0_MAX_PIS, + sizeof(*port_matrix), GFP_KERNEL); + if (!port_matrix) { + ret = -ENOMEM; + goto err_free_manager; + } + priv->port_matrix = port_matrix; + ret = pd692x0_of_get_managers(priv, manager); if (ret < 0) - return ret; + goto err_free_manager; - nmanagers = ret; - ret = pd692x0_register_managers_regulator(priv, manager, nmanagers); + ret = pd692x0_register_managers_regulator(priv, manager); if (ret) goto err_of_managers; - ret = pd692x0_configure_managers(priv, nmanagers); + ret = pd692x0_req_managers_pw_budget(priv); if (ret) goto err_of_managers; - ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix); + ret = pd692x0_set_ports_matrix(priv, manager); if (ret) goto err_managers_req_pw; - ret = pd692x0_write_ports_matrix(priv, port_matrix); - if (ret) - goto err_managers_req_pw; + /* Do not init the conf if it is already saved */ + if (!priv->cfg_saved) { + ret = pd692x0_hw_conf_init(priv); + if (ret) + goto err_managers_req_pw; - pd692x0_of_put_managers(priv, manager, nmanagers); + ret = pd692x0_save_user_byte(priv); + if (ret) + goto err_managers_req_pw; + } + + pd692x0_of_put_managers(priv, manager); + kfree(manager); return 0; err_managers_req_pw: pd692x0_managers_free_pw_budget(priv); err_of_managers: - pd692x0_of_put_managers(priv, manager, nmanagers); + pd692x0_of_put_managers(priv, manager); +err_free_manager: + kfree(manager); return ret; } @@ -1644,7 +1710,7 @@ static enum fw_upload_err pd692x0_fw_poll_complete(struct fw_upload *fwl) return FW_UPLOAD_ERR_FW_INVALID; } - ret = pd692x0_setup_pi_matrix(&priv->pcdev); + ret = pd692x0_hw_conf_init(priv); if (ret < 0) { dev_err(&client->dev, "Error configuring ports matrix (%pe)\n", ERR_PTR(ret)); @@ -1753,6 +1819,9 @@ static int pd692x0_i2c_probe(struct i2c_client *client) } } + if (buf.data[2] == PD692X0_USER_BYTE) + priv->cfg_saved = true; + priv->np = dev->of_node; priv->pcdev.nr_lines = PD692X0_MAX_PIS; priv->pcdev.owner = THIS_MODULE; diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index b724b222ab44..76ec1555d60d 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -55,8 +55,6 @@ #define TPS23881_REG_TPON BIT(0) #define TPS23881_REG_FWREV 0x41 #define TPS23881_REG_DEVID 0x43 -#define TPS23881_REG_DEVID_MASK 0xF0 -#define TPS23881_DEVICE_ID 0x02 #define TPS23881_REG_CHAN1_CLASS 0x4c #define TPS23881_REG_SRAM_CTRL 0x60 #define TPS23881_REG_SRAM_DATA 0x61 @@ -1012,8 +1010,28 @@ static const struct pse_controller_ops tps23881_ops = { .pi_get_pw_req = tps23881_pi_get_pw_req, }; -static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin"; -static const char fw_sram_name[] = "ti/tps23881/tps23881-sram-14.bin"; +struct tps23881_info { + u8 dev_id; /* device ID and silicon revision */ + const char *fw_parity_name; /* parity code firmware file name */ + const char *fw_sram_name; /* SRAM code firmware file name */ +}; + +enum tps23881_model { + TPS23881, + TPS23881B, +}; + +static const struct tps23881_info tps23881_info[] = { + [TPS23881] = { + .dev_id = 0x22, + .fw_parity_name = "ti/tps23881/tps23881-parity-14.bin", + .fw_sram_name = "ti/tps23881/tps23881-sram-14.bin", + }, + [TPS23881B] = { + .dev_id = 0x24, + /* skip SRAM load, ROM provides Clause 145 hardware-level support */ + }, +}; struct tps23881_fw_conf { u8 reg; @@ -1085,16 +1103,17 @@ out: return ret; } -static int tps23881_flash_sram_fw(struct i2c_client *client) +static int tps23881_flash_sram_fw(struct i2c_client *client, + const struct tps23881_info *info) { int ret; - ret = tps23881_flash_sram_fw_part(client, fw_parity_name, + ret = tps23881_flash_sram_fw_part(client, info->fw_parity_name, tps23881_fw_parity_conf); if (ret) return ret; - ret = tps23881_flash_sram_fw_part(client, fw_sram_name, + ret = tps23881_flash_sram_fw_part(client, info->fw_sram_name, tps23881_fw_sram_conf); if (ret) return ret; @@ -1412,6 +1431,7 @@ static int tps23881_setup_irq(struct tps23881_priv *priv, int irq) static int tps23881_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; + const struct tps23881_info *info; struct tps23881_priv *priv; struct gpio_desc *reset; int ret; @@ -1422,6 +1442,10 @@ static int tps23881_i2c_probe(struct i2c_client *client) return -ENXIO; } + info = i2c_get_match_data(client); + if (!info) + return -EINVAL; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -1440,7 +1464,7 @@ static int tps23881_i2c_probe(struct i2c_client *client) * to Load TPS2388x SRAM and Parity Code over I2C" (Rev E)) * indicates we should delay that programming by at least 50ms. So * we'll wait the entire 50ms here to ensure we're safe to go to the - * SRAM loading proceedure. + * SRAM loading procedure. */ msleep(50); } @@ -1449,20 +1473,27 @@ static int tps23881_i2c_probe(struct i2c_client *client) if (ret < 0) return ret; - if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) { + if (ret != info->dev_id) { dev_err(dev, "Wrong device ID\n"); return -ENXIO; } - ret = tps23881_flash_sram_fw(client); - if (ret < 0) - return ret; + if (info->fw_sram_name) { + ret = tps23881_flash_sram_fw(client, info); + if (ret < 0) + return ret; + } ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FWREV); if (ret < 0) return ret; - dev_info(&client->dev, "Firmware revision 0x%x\n", ret); + if (ret == 0xFF) { + dev_err(&client->dev, "Device entered safe mode\n"); + return -ENXIO; + } + dev_info(&client->dev, "Firmware revision 0x%x%s\n", ret, + ret == 0x00 ? " (ROM firmware)" : ""); /* Set configuration B, 16 bit access on a single device address */ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_GEN_MASK); @@ -1498,13 +1529,21 @@ static int tps23881_i2c_probe(struct i2c_client *client) } static const struct i2c_device_id tps23881_id[] = { - { "tps23881" }, + { "tps23881", .driver_data = (kernel_ulong_t)&tps23881_info[TPS23881] }, + { "tps23881b", .driver_data = (kernel_ulong_t)&tps23881_info[TPS23881B] }, { } }; MODULE_DEVICE_TABLE(i2c, tps23881_id); static const struct of_device_id tps23881_of_match[] = { - { .compatible = "ti,tps23881", }, + { + .compatible = "ti,tps23881", + .data = &tps23881_info[TPS23881] + }, + { + .compatible = "ti,tps23881b", + .data = &tps23881_info[TPS23881B] + }, { }, }; MODULE_DEVICE_TABLE(of, tps23881_of_match); diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 55aa8d0c8e1f..c10198d44576 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -1165,7 +1165,7 @@ int sungem_phy_probe(struct mii_phy *phy, int mii_id) int i; /* We do not reset the mii_phy structure as the driver - * may re-probe the PHY regulary + * may re-probe the PHY regularly */ phy->mii_id = mii_id; diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index 17f07eb0ee52..29dc04c299a3 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -982,63 +982,6 @@ static void team_port_disable(struct team *team, team_lower_state_changed(port); } -#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_HIGHDMA | NETIF_F_LRO | \ - NETIF_F_GSO_ENCAP_ALL) - -#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) - -static void __team_compute_features(struct team *team) -{ - struct team_port *port; - netdev_features_t vlan_features = TEAM_VLAN_FEATURES; - netdev_features_t enc_features = TEAM_ENC_FEATURES; - unsigned short max_hard_header_len = ETH_HLEN; - unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | - IFF_XMIT_DST_RELEASE_PERM; - - rcu_read_lock(); - if (list_empty(&team->port_list)) - goto done; - - vlan_features = netdev_base_features(vlan_features); - enc_features = netdev_base_features(enc_features); - - list_for_each_entry_rcu(port, &team->port_list, list) { - vlan_features = netdev_increment_features(vlan_features, - port->dev->vlan_features, - TEAM_VLAN_FEATURES); - enc_features = - netdev_increment_features(enc_features, - port->dev->hw_enc_features, - TEAM_ENC_FEATURES); - - dst_release_flag &= port->dev->priv_flags; - if (port->dev->hard_header_len > max_hard_header_len) - max_hard_header_len = port->dev->hard_header_len; - } -done: - rcu_read_unlock(); - - team->dev->vlan_features = vlan_features; - team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - team->dev->hard_header_len = max_hard_header_len; - - team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; - if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) - team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; -} - -static void team_compute_features(struct team *team) -{ - __team_compute_features(team); - netdev_change_features(team->dev); -} - static int team_port_enter(struct team *team, struct team_port *port) { int err = 0; @@ -1300,7 +1243,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); - __team_compute_features(team); + netdev_compute_master_upper_features(team->dev, true); __team_port_change_port_added(port, !!netif_oper_up(port_dev)); __team_options_change_check(team); @@ -1382,7 +1325,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) dev_set_mtu(port_dev, port->orig.mtu); kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); - __team_compute_features(team); + netdev_compute_master_upper_features(team->dev, true); return 0; } @@ -1970,33 +1913,19 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev, struct netlink_ext_ack *extack) { struct team *team = netdev_priv(dev); - int err; ASSERT_RTNL(); - err = team_port_add(team, port_dev, extack); - - if (!err) - netdev_change_features(dev); - - return err; + return team_port_add(team, port_dev, extack); } static int team_del_slave(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); - int err; ASSERT_RTNL(); - err = team_port_del(team, port_dev); - - if (err) - return err; - - netdev_change_features(dev); - - return err; + return team_port_del(team, port_dev); } static netdev_features_t team_fix_features(struct net_device *dev, @@ -2190,7 +2119,7 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_GRO; - dev->hw_features = TEAM_VLAN_FEATURES | + dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_RX | @@ -2994,7 +2923,7 @@ static int team_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: if (!port->team->notifier_ctx) { port->team->notifier_ctx = true; - team_compute_features(port->team); + netdev_compute_master_upper_features(port->team->dev, true); port->team->notifier_ctx = false; } break; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index a22d4bb2cf3b..fa5192583860 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9311,6 +9311,7 @@ static const struct ethtool_ops ops = { .set_ringparam = rtl8152_set_ringparam, .get_pauseparam = rtl8152_get_pauseparam, .set_pauseparam = rtl8152_set_pauseparam, + .get_ts_info = ethtool_op_get_ts_info, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 697cd9d866d3..3d10cf791c51 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -189,7 +189,7 @@ static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device * is_local_ether_addr(net->dev_addr)); } -static void intr_complete (struct urb *urb) +static void intr_complete(struct urb *urb) { struct usbnet *dev = urb->context; int status = urb->status; @@ -221,7 +221,7 @@ static void intr_complete (struct urb *urb) "intr resubmit --> %d\n", status); } -static int init_status (struct usbnet *dev, struct usb_interface *intf) +static int init_status(struct usbnet *dev, struct usb_interface *intf) { char *buf = NULL; unsigned pipe = 0; @@ -326,7 +326,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. */ -void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) +void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); unsigned long flags; @@ -396,7 +396,7 @@ EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); * *-------------------------------------------------------------------------*/ -int usbnet_change_mtu (struct net_device *net, int new_mtu) +int usbnet_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); int ll_mtu = new_mtu + net->hard_header_len; @@ -472,7 +472,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, * NOTE: annoying asymmetry: if it's active, schedule_work() fails, * but tasklet_schedule() doesn't. hope the failure is rare. */ -void usbnet_defer_kevent (struct usbnet *dev, int work) +void usbnet_defer_kevent(struct usbnet *dev, int work) { set_bit (work, &dev->flags); if (!usbnet_going_away(dev)) { @@ -489,9 +489,9 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent); /*-------------------------------------------------------------------------*/ -static void rx_complete (struct urb *urb); +static void rx_complete(struct urb *urb); -static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) +static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; @@ -597,7 +597,7 @@ static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) /*-------------------------------------------------------------------------*/ -static void rx_complete (struct urb *urb) +static void rx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; @@ -728,7 +728,7 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); // unlink pending rx/tx; completion handlers do all other cleanup -static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) +static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q) { unsigned long flags; struct sk_buff *skb; @@ -823,7 +823,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev) remove_wait_queue(&dev->wait, &wait); } -int usbnet_stop (struct net_device *net) +int usbnet_stop(struct net_device *net) { struct usbnet *dev = netdev_priv(net); const struct driver_info *info = dev->driver_info; @@ -831,6 +831,7 @@ int usbnet_stop (struct net_device *net) clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); + netdev_reset_queue(net); netif_info(dev, ifdown, dev->net, "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", @@ -892,7 +893,7 @@ EXPORT_SYMBOL_GPL(usbnet_stop); // precondition: never called in_interrupt -int usbnet_open (struct net_device *net) +int usbnet_open(struct net_device *net) { struct usbnet *dev = netdev_priv(net); int retval; @@ -939,6 +940,7 @@ int usbnet_open (struct net_device *net) } set_bit(EVENT_DEV_OPEN, &dev->flags); + netdev_reset_queue(net); netif_start_queue (net); netif_info(dev, ifup, dev->net, "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", @@ -1048,7 +1050,7 @@ int usbnet_set_link_ksettings_mii(struct net_device *net, } EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); -u32 usbnet_get_link (struct net_device *net) +u32 usbnet_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -1076,7 +1078,7 @@ int usbnet_nway_reset(struct net_device *net) } EXPORT_SYMBOL_GPL(usbnet_nway_reset); -void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) +void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); @@ -1087,7 +1089,7 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) } EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); -u32 usbnet_get_msglevel (struct net_device *net) +u32 usbnet_get_msglevel(struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -1095,7 +1097,7 @@ u32 usbnet_get_msglevel (struct net_device *net) } EXPORT_SYMBOL_GPL(usbnet_get_msglevel); -void usbnet_set_msglevel (struct net_device *net, u32 level) +void usbnet_set_msglevel(struct net_device *net, u32 level) { struct usbnet *dev = netdev_priv(net); @@ -1166,7 +1168,7 @@ static void __handle_set_rx_mode(struct usbnet *dev) * especially now that control transfers can be queued. */ static void -usbnet_deferred_kevent (struct work_struct *work) +usbnet_deferred_kevent(struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); @@ -1277,7 +1279,7 @@ skip_reset: /*-------------------------------------------------------------------------*/ -static void tx_complete (struct urb *urb) +static void tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; @@ -1332,7 +1334,7 @@ static void tx_complete (struct urb *urb) /*-------------------------------------------------------------------------*/ -void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue) +void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue) { struct usbnet *dev = netdev_priv(net); @@ -1382,8 +1384,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) return 1; } -netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, - struct net_device *net) +netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); unsigned int length; @@ -1501,6 +1502,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, case 0: netif_trans_update(net); __usbnet_queue_skb(&dev->txq, skb, tx_start); + netdev_sent_queue(net, skb->len); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } @@ -1561,9 +1563,10 @@ static inline void usb_free_skb(struct sk_buff *skb) // work (work deferred from completions, in_irq) or timer -static void usbnet_bh (struct timer_list *t) +static void usbnet_bh(struct timer_list *t) { struct usbnet *dev = timer_container_of(dev, t, delay); + unsigned int bytes_compl = 0, pkts_compl = 0; struct sk_buff *skb; struct skb_data *entry; @@ -1575,6 +1578,8 @@ static void usbnet_bh (struct timer_list *t) usb_free_skb(skb); continue; case tx_done: + bytes_compl += skb->len; + pkts_compl++; kfree(entry->urb->sg); fallthrough; case rx_cleanup: @@ -1585,6 +1590,10 @@ static void usbnet_bh (struct timer_list *t) } } + spin_lock_bh(&dev->bql_spinlock); + netdev_completed_queue(dev->net, pkts_compl, bytes_compl); + spin_unlock_bh(&dev->bql_spinlock); + /* restart RX again after disabling due to high error rate */ clear_bit(EVENT_RX_KILL, &dev->flags); @@ -1636,7 +1645,7 @@ static void usbnet_bh_work(struct work_struct *work) // precondition: never called in_interrupt -void usbnet_disconnect (struct usb_interface *intf) +void usbnet_disconnect(struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; @@ -1702,7 +1711,7 @@ static const struct device_type wwan_type = { }; int -usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) +usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; @@ -1756,6 +1765,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); skb_queue_head_init(&dev->rxq_pause); + spin_lock_init(&dev->bql_spinlock); INIT_WORK(&dev->bh_work, usbnet_bh_work); INIT_WORK (&dev->kevent, usbnet_deferred_kevent); init_usb_anchor(&dev->deferred); @@ -1909,7 +1919,7 @@ EXPORT_SYMBOL_GPL(usbnet_probe); * resume only when the last interface is resumed */ -int usbnet_suspend (struct usb_interface *intf, pm_message_t message) +int usbnet_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1942,7 +1952,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) } EXPORT_SYMBOL_GPL(usbnet_suspend); -int usbnet_resume (struct usb_interface *intf) +int usbnet_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct sk_buff *skb; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index a3046142cb8e..87a63c4bee77 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1323,7 +1323,7 @@ static int veth_set_channels(struct net_device *dev, if (peer) netif_carrier_off(peer); - /* try to allocate new resurces, as needed*/ + /* try to allocate new resources, as needed*/ err = veth_enable_range_safe(dev, old_rx_count, new_rx_count); if (err) goto out; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0369dda5ed60..cfa006b88688 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3776,7 +3776,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) * (2) no user configuration. * * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is, - * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs + * the device updates queue_pairs together with rss, so we can skip the separate queue_pairs * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly. */ if (vi->has_rss && !netif_is_rxfh_configured(dev)) { diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 7496a2e9a282..159295c4bd6d 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -126,14 +126,12 @@ static inline struct proto *get_proto(struct net_device *dev, u16 pid) static inline const char *proto_name(u16 pid) { switch (pid) { - case PID_LCP: - return "LCP"; case PID_IPCP: return "IPCP"; case PID_IPV6CP: return "IPV6CP"; default: - return NULL; + return "LCP"; } } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 6f78f1752cd6..7c2939cbde5f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -3,7 +3,6 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ @@ -1187,7 +1186,7 @@ static int ath10k_download_fw(struct ath10k *ar) u32 address, data_len; const void *data; int ret; - struct pm_qos_request latency_qos; + struct pm_qos_request latency_qos = {}; address = ar->hw_params.patch_load_addr; @@ -1221,7 +1220,6 @@ static int ath10k_download_fw(struct ath10k *ar) ret); } - memset(&latency_qos, 0, sizeof(latency_qos)); cpu_latency_qos_add_request(&latency_qos, 0); ret = ath10k_bmi_fast_download(ar, address, data, data_len); @@ -2493,8 +2491,9 @@ static int ath10k_init_hw_params(struct ath10k *ar) return 0; } -static bool ath10k_core_needs_recovery(struct ath10k *ar) +static void ath10k_core_recovery_check_work(struct work_struct *work) { + struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work); long time_left; /* Sometimes the recovery will fail and then the next all recovery fail, @@ -2504,7 +2503,7 @@ static bool ath10k_core_needs_recovery(struct ath10k *ar) ath10k_err(ar, "consecutive fail %d times, will shutdown driver!", atomic_read(&ar->fail_cont_count)); ar->state = ATH10K_STATE_WEDGED; - return false; + return; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count); @@ -2518,27 +2517,24 @@ static bool ath10k_core_needs_recovery(struct ath10k *ar) ATH10K_RECOVERY_TIMEOUT_HZ); if (time_left) { ath10k_warn(ar, "previous recovery succeeded, skip this!\n"); - return false; + return; } /* Record the continuous recovery fail count when recovery failed. */ atomic_inc(&ar->fail_cont_count); /* Avoid having multiple recoveries at the same time. */ - return false; + return; } atomic_inc(&ar->pending_recovery); - - return true; + queue_work(ar->workqueue, &ar->restart_work); } void ath10k_core_start_recovery(struct ath10k *ar) { - if (!ath10k_core_needs_recovery(ar)) - return; - - queue_work(ar->workqueue, &ar->restart_work); + /* Use workqueue_aux to avoid blocking recovery tracking */ + queue_work(ar->workqueue_aux, &ar->recovery_check_work); } EXPORT_SYMBOL(ath10k_core_start_recovery); @@ -3356,7 +3352,7 @@ EXPORT_SYMBOL(ath10k_core_stop); */ static int ath10k_core_probe_fw(struct ath10k *ar) { - struct bmi_target_info target_info; + struct bmi_target_info target_info = {}; int ret = 0; ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); @@ -3367,7 +3363,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar) switch (ar->hif.bus) { case ATH10K_BUS_SDIO: - memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info_sdio(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); @@ -3379,7 +3374,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar) case ATH10K_BUS_PCI: case ATH10K_BUS_AHB: case ATH10K_BUS_USB: - memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); @@ -3389,7 +3383,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ar->hw->wiphy->hw_version = target_info.version; break; case ATH10K_BUS_SNOC: - memset(&target_info, 0, sizeof(target_info)); ret = ath10k_hif_get_target_info(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); @@ -3734,6 +3727,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, INIT_WORK(&ar->register_work, ath10k_core_register_work); INIT_WORK(&ar->restart_work, ath10k_core_restart); + INIT_WORK(&ar->recovery_check_work, ath10k_core_recovery_check_work); INIT_WORK(&ar->set_coverage_class_work, ath10k_core_set_coverage_class_work); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 8c72ed386edb..73a9db302245 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -3,7 +3,6 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ @@ -1208,6 +1207,7 @@ struct ath10k { struct work_struct register_work; struct work_struct restart_work; + struct work_struct recovery_check_work; struct work_struct bundle_tx_work; struct work_struct tx_complete_work; @@ -1259,9 +1259,13 @@ struct ath10k { struct { /* protected by conf_mutex */ struct ath10k_fw_components utf_mode_fw; + u8 ftm_msgref; /* protected by data_lock */ bool utf_monitor; + u32 data_pos; + u32 expected_seq; + u8 *eventdata; } testmode; struct { diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 154ac7a70982..da6f7957a0ae 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3,7 +3,6 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ @@ -5428,6 +5427,7 @@ static void ath10k_stop(struct ieee80211_hw *hw, bool suspend) cancel_work_sync(&ar->set_coverage_class_work); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->restart_work); + cancel_work_sync(&ar->recovery_check_work); } static int ath10k_config_ps(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index f1f33af0170a..8275345631a0 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -986,7 +986,7 @@ static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl, ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n"); - ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq, + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)&qmi->sq, sizeof(qmi->sq), 0); if (ret) { ath10k_err(ar, "failed to connect to a remote QMI service port\n"); diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 3fcefc55b74f..d3bd385694d6 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: ISC /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "testmode.h" @@ -10,12 +11,17 @@ #include "debug.h" #include "wmi.h" +#include "wmi-tlv.h" #include "hif.h" #include "hw.h" #include "core.h" #include "testmode_i.h" +#define ATH10K_FTM_SEG_NONE ((u32)-1) +#define ATH10K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) +#define ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) + static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = { [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 }, [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY, @@ -25,41 +31,19 @@ static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = { [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, }; -/* Returns true if callee consumes the skb and the skb should be discarded. - * Returns false if skb is not used. Does not sleep. - */ -bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +static void ath10k_tm_event_unsegmented(struct ath10k *ar, u32 cmd_id, + struct sk_buff *skb) { struct sk_buff *nl_skb; - bool consumed; int ret; - ath10k_dbg(ar, ATH10K_DBG_TESTMODE, - "testmode event wmi cmd_id %d skb %p skb->len %d\n", - cmd_id, skb, skb->len); - - ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); - - spin_lock_bh(&ar->data_lock); - - if (!ar->testmode.utf_monitor) { - consumed = false; - goto out; - } - - /* Only testmode.c should be handling events from utf firmware, - * otherwise all sort of problems will arise as mac80211 operations - * are not initialised. - */ - consumed = true; - nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, 2 * sizeof(u32) + skb->len, GFP_ATOMIC); if (!nl_skb) { ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n"); - goto out; + return; } ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI); @@ -68,7 +52,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) "failed to put testmode wmi event cmd attribute: %d\n", ret); kfree_skb(nl_skb); - goto out; + return; } ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id); @@ -77,7 +61,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) "failed to put testmode wmi event cmd_id: %d\n", ret); kfree_skb(nl_skb); - goto out; + return; } ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data); @@ -86,10 +70,122 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) "failed to copy skb to testmode wmi event: %d\n", ret); kfree_skb(nl_skb); - goto out; + return; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +static void ath10k_tm_event_segmented(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +{ + struct wmi_ftm_cmd *ftm = (struct wmi_ftm_cmd *)skb->data; + u8 total_segments, current_seq; + struct sk_buff *nl_skb; + u8 const *buf_pos; + u16 datalen; + u32 data_pos; + int ret; + + if (skb->len < sizeof(*ftm)) { + ath10k_warn(ar, "Invalid ftm event length: %d\n", skb->len); + return; + } + + current_seq = FIELD_GET(ATH10K_FTM_SEGHDR_CURRENT_SEQ, + __le32_to_cpu(ftm->seg_hdr.segmentinfo)); + total_segments = FIELD_GET(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, + __le32_to_cpu(ftm->seg_hdr.segmentinfo)); + datalen = skb->len - sizeof(*ftm); + buf_pos = ftm->data; + + if (current_seq == 0) { + ar->testmode.expected_seq = 0; + ar->testmode.data_pos = 0; + } + + data_pos = ar->testmode.data_pos; + + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { + ath10k_warn(ar, "Invalid ftm event length at %u: %u\n", + data_pos, datalen); + ret = -EINVAL; + return; + } + + memcpy(&ar->testmode.eventdata[data_pos], buf_pos, datalen); + data_pos += datalen; + + if (++ar->testmode.expected_seq != total_segments) { + ar->testmode.data_pos = data_pos; + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "partial data received %u/%u\n", + current_seq + 1, total_segments); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "total data length %u\n", data_pos); + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, + 2 * sizeof(u32) + data_pos, + GFP_ATOMIC); + if (!nl_skb) { + ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n"); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_TLV); + if (ret) { + ath10k_warn(ar, "failed to put testmode wmi event attribute: %d\n", ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to put testmode wmi event cmd_id: %d\n", ret); + kfree_skb(nl_skb); + return; + } + + ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, data_pos, &ar->testmode.eventdata[0]); + if (ret) { + ath10k_warn(ar, "failed to copy skb to testmode wmi event: %d\n", ret); + kfree_skb(nl_skb); + return; } cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +/* Returns true if callee consumes the skb and the skb should be discarded. + * Returns false if skb is not used. Does not sleep. + */ +bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +{ + bool consumed; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode event wmi cmd_id %d skb %p skb->len %d\n", + cmd_id, skb, skb->len); + + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); + + spin_lock_bh(&ar->data_lock); + + if (!ar->testmode.utf_monitor) { + consumed = false; + goto out; + } + + /* Only testmode.c should be handling events from utf firmware, + * otherwise all sort of problems will arise as mac80211 operations + * are not initialised. + */ + consumed = true; + + if (ar->testmode.expected_seq != ATH10K_FTM_SEG_NONE) + ath10k_tm_event_segmented(ar, cmd_id, skb); + else + ath10k_tm_event_unsegmented(ar, cmd_id, skb); out: spin_unlock_bh(&ar->data_lock); @@ -281,12 +377,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) goto err_release_utf_mode_fw; } + ar->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL); + if (!ar->testmode.eventdata) { + ret = -ENOMEM; + goto err_power_down; + } + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF, &ar->testmode.utf_mode_fw); if (ret) { ath10k_err(ar, "failed to start core (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; - goto err_power_down; + goto err_release_eventdata; } ar->state = ATH10K_STATE_UTF; @@ -302,6 +404,10 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) return 0; +err_release_eventdata: + kfree(ar->testmode.eventdata); + ar->testmode.eventdata = NULL; + err_power_down: ath10k_hif_power_down(ar); @@ -341,6 +447,9 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware); ar->testmode.utf_mode_fw.fw_file.firmware = NULL; + kfree(ar->testmode.eventdata); + ar->testmode.eventdata = NULL; + ar->state = ATH10K_STATE_OFF; } @@ -424,6 +533,85 @@ out: return ret; } +static int ath10k_tm_cmd_tlv(struct ath10k *ar, struct nlattr *tb[]) +{ + u16 total_bytes, num_segments; + u32 cmd_id, buf_len; + u8 segnumber = 0; + u8 *bufpos; + void *buf; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); + cmd_id = WMI_PDEV_UTF_CMDID; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "cmd wmi ftm cmd_id %d buffer length %d\n", + cmd_id, buf_len); + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); + + bufpos = buf; + total_bytes = buf_len; + num_segments = total_bytes / MAX_WMI_UTF_LEN; + ar->testmode.expected_seq = 0; + + if (buf_len - (num_segments * MAX_WMI_UTF_LEN)) + num_segments++; + + while (buf_len) { + u16 chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN); + struct wmi_ftm_cmd *ftm_cmd; + struct sk_buff *skb; + u32 hdr_info; + u8 seginfo; + + skb = ath10k_wmi_alloc_skb(ar, (chunk_len + + sizeof(struct wmi_ftm_cmd))); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ftm_cmd = (struct wmi_ftm_cmd *)skb->data; + hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TLV_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, (chunk_len + + sizeof(struct wmi_ftm_seg_hdr))); + ftm_cmd->tlv_header = __cpu_to_le32(hdr_info); + ftm_cmd->seg_hdr.len = __cpu_to_le32(total_bytes); + ftm_cmd->seg_hdr.msgref = __cpu_to_le32(ar->testmode.ftm_msgref); + seginfo = FIELD_PREP(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) | + FIELD_PREP(ATH10K_FTM_SEGHDR_CURRENT_SEQ, segnumber); + ftm_cmd->seg_hdr.segmentinfo = __cpu_to_le32(seginfo); + segnumber++; + + memcpy(&ftm_cmd->data, bufpos, chunk_len); + + ret = ath10k_wmi_cmd_send(ar, skb, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to send wmi ftm command: %d\n", ret); + goto out; + } + + buf_len -= chunk_len; + bufpos += chunk_len; + } + + ar->testmode.ftm_msgref++; + ret = 0; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { @@ -439,9 +627,14 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (!tb[ATH10K_TM_ATTR_CMD]) return -EINVAL; + ar->testmode.expected_seq = ATH10K_FTM_SEG_NONE; + switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) { case ATH10K_TM_CMD_GET_VERSION: - return ath10k_tm_cmd_get_version(ar, tb); + if (!tb[ATH10K_TM_ATTR_DATA]) + return ath10k_tm_cmd_get_version(ar, tb); + else /* ATH10K_TM_CMD_TLV */ + return ath10k_tm_cmd_tlv(ar, tb); case ATH10K_TM_CMD_UTF_START: return ath10k_tm_cmd_utf_start(ar, tb); case ATH10K_TM_CMD_UTF_STOP: diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h index ee1cb27c1d60..1603f5276682 100644 --- a/drivers/net/wireless/ath/ath10k/testmode_i.h +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ /* "API" level of the ath10k testmode interface. Bump it after every @@ -14,6 +15,7 @@ #define ATH10K_TESTMODE_VERSION_MINOR 0 #define ATH10K_TM_DATA_MAX_LEN 5000 +#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048 enum ath10k_tm_attr { __ATH10K_TM_ATTR_INVALID = 0, @@ -57,4 +59,17 @@ enum ath10k_tm_cmd { * ATH10K_TM_ATTR_DATA. */ ATH10K_TM_CMD_WMI = 3, + + /* The command used to transmit a test command to the firmware + * and the event to receive test events from the firmware. The data + * received only contain the TLV payload, need to add the tlv header + * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID. + * The data payload size could be large and the driver needs to + * send segmented data to firmware. + * + * This legacy testmode command shares the same value as the get-version + * command. To distinguish between them, we check whether the data attribute + * is present. + */ + ATH10K_TM_CMD_TLV = ATH10K_TM_CMD_GET_VERSION, }; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 0faefc0a9a40..7f50a1de6b97 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3,7 +3,7 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef _WMI_H_ @@ -7418,6 +7418,23 @@ struct wmi_pdev_bb_timing_cfg_cmd { __le32 bb_xpa_timing; } __packed; +struct wmi_ftm_seg_hdr { + __le32 len; + __le32 msgref; + __le32 segmentinfo; + __le32 pdev_id; +} __packed; + +struct wmi_ftm_cmd { + __le32 tlv_header; + struct wmi_ftm_seg_hdr seg_hdr; + u8 data[]; +} __packed; + +#define WMI_TLV_LEN GENMASK(15, 0) +#define WMI_TLV_TAG GENMASK(31, 16) +#define MAX_WMI_UTF_LEN 252 + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 839095af9267..82603a389bb9 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH11K_HAL_H @@ -43,14 +43,14 @@ struct ath11k_base; #define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 -#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \ - (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg) -#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \ - (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg) -#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \ - (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg) -#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \ - (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg) +#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \ + ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \ + ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \ + ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg) +#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \ + ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg) #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 #define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 @@ -209,10 +209,10 @@ struct ath11k_base; #define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp /* WBM Idle R0 address */ -#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \ - (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb) -#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \ - (ab->hw_params.regs->hal_wbm_idle_link_ring_misc) +#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \ + ((ab)->hw_params.regs->hal_wbm_idle_link_ring_base_lsb) +#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \ + ((ab)->hw_params.regs->hal_wbm_idle_link_ring_misc) #define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048 #define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c #define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058 @@ -227,17 +227,17 @@ struct ath11k_base; #define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0 /* SW2WBM R0 release address */ -#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \ - (ab->hw_params.regs->hal_wbm_release_ring_base_lsb) +#define HAL_WBM_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params.regs->hal_wbm_release_ring_base_lsb) /* SW2WBM R2 release address */ #define HAL_WBM_RELEASE_RING_HP 0x00003018 /* WBM2SW R0 release address */ -#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \ - (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb) -#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \ - (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb) +#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params.regs->hal_wbm0_release_ring_base_lsb) +#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \ + ((ab)->hw_params.regs->hal_wbm1_release_ring_base_lsb) /* WBM2SW R2 release address */ #define HAL_WBM0_RELEASE_RING_HP 0x000030c0 diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 0e41b5a91d66..3276fe443502 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2235,9 +2235,9 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, arg->peer_nss = min(sta->deflink.rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + arg->rx_mcs_set = ath11k_peer_assoc_h_vht_limit(arg->rx_mcs_set, vht_mcs_mask); arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); - arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit( - __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); + arg->tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default. * VHT mcs rate 10 and 11 is not supported in 11ac standard. @@ -2522,10 +2522,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, he_tx_mcs = v; } v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); - v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; arg->peer_he_mcs_count++; @@ -2535,10 +2535,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, default: v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); - v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; arg->peer_he_mcs_count++; @@ -4028,6 +4028,150 @@ static int ath11k_start_scan(struct ath11k *ar, return 0; } +static void ath11k_mac_fw_stats_reset(struct ath11k *ar) +{ + spin_lock_bh(&ar->data_lock); + ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); + ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); + ar->fw_stats.num_vdev_recvd = 0; + ar->fw_stats.num_bcn_recvd = 0; + spin_unlock_bh(&ar->data_lock); +} + +int ath11k_mac_fw_stats_request(struct ath11k *ar, + struct stats_request_params *req_param) +{ + struct ath11k_base *ab = ar->ab; + unsigned long time_left; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_mac_fw_stats_reset(ar); + + reinit_completion(&ar->fw_stats_complete); + reinit_completion(&ar->fw_stats_done); + + ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); + + if (ret) { + ath11k_warn(ab, "could not request fw stats (%d)\n", + ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); + if (!time_left) + return -ETIMEDOUT; + + /* FW stats can get split when exceeding the stats data buffer limit. + * In that case, since there is no end marking for the back-to-back + * received 'update stats' event, we keep a 3 seconds timeout in case, + * fw_stats_done is not marked yet + */ + time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); + if (!time_left) + return -ETIMEDOUT; + + return 0; +} + +static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id, + u32 vdev_id, u32 stats_id) +{ + struct ath11k_base *ab = ar->ab; + struct stats_request_params req_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->state != ATH11K_STATE_ON) + return -ENETDOWN; + + req_param.pdev_id = pdev_id; + req_param.vdev_id = vdev_id; + req_param.stats_id = stats_id; + + ret = ath11k_mac_fw_stats_request(ar, &req_param); + if (ret) + ath11k_warn(ab, "failed to request fw stats: %d\n", ret); + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", + pdev_id, vdev_id, stats_id); + + return ret; +} + +static int ath11k_mac_handle_get_txpower(struct ath11k *ar, + struct ieee80211_vif *vif, + int *dbm) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_fw_stats_pdev *pdev; + int ret; + + /* Final Tx power is minimum of Target Power, CTL power, Regulatory + * Power, PSD EIRP Power. We just know the Regulatory power from the + * regulatory rules obtained. FW knows all these power and sets the min + * of these. Hence, we request the FW pdev stats in which FW reports + * the minimum of all vdev's channel Tx power. + */ + lockdep_assert_held(&ar->conf_mutex); + + /* Firmware doesn't provide Tx power during CAC hence no need to fetch + * the stats. + */ + if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) + return -EAGAIN; + + ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_PDEV_STAT); + if (ret) { + ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); + goto err_fallback; + } + + spin_lock_bh(&ar->data_lock); + pdev = list_first_entry_or_null(&ar->fw_stats.pdevs, + struct ath11k_fw_stats_pdev, list); + if (!pdev) { + spin_unlock_bh(&ar->data_lock); + goto err_fallback; + } + + /* tx power is set as 2 units per dBm in FW. */ + *dbm = pdev->chan_tx_power / 2; + + spin_unlock_bh(&ar->data_lock); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n", + pdev->chan_tx_power, *dbm); + return 0; + +err_fallback: + /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */ + *dbm = vif->bss_conf.txpower; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n", + *dbm); + return 0; +} + +static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, + int *dbm) +{ + struct ath11k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + ret = ath11k_mac_handle_get_txpower(ar, vif, dbm); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) @@ -6107,6 +6251,159 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) ath11k_mgmt_over_wmi_tx_drop(ar, skb); } +static int ath11k_mac_mgmt_action_frame_fill_elem_data(struct ath11k_vif *arvif, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u8 category, *buf, iv_len, action_code, dialog_token; + int cur_tx_power, max_tx_power; + struct ath11k *ar = arvif->ar; + struct cfg80211_chan_def def; + struct ath11k_skb_cb *skb_cb; + struct ieee80211_mgmt *mgmt; + unsigned int remaining_len; + bool has_protected; + + lockdep_assert_held(&ar->conf_mutex); + + /* make sure category field is present */ + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return -EINVAL; + + remaining_len = skb->len - IEEE80211_MIN_ACTION_SIZE; + has_protected = ieee80211_has_protected(hdr->frame_control); + + /* In case of SW crypto and hdr protected (PMF), packet will already be encrypted, + * we can't put in data in this case + */ + if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) && + has_protected) + return 0; + + mgmt = (struct ieee80211_mgmt *)hdr; + buf = (u8 *)&mgmt->u.action; + + /* FCTL_PROTECTED frame might have extra space added for HDR_LEN. Offset that + * many bytes if it is there + */ + if (has_protected) { + skb_cb = ATH11K_SKB_CB(skb); + + switch (skb_cb->cipher) { + /* Cipher suite having flag %IEEE80211_KEY_FLAG_GENERATE_IV_MGMT set in + * key needs to be processed. See ath11k_install_key() + */ + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + iv_len = IEEE80211_CCMP_HDR_LEN; + break; + case WLAN_CIPHER_SUITE_TKIP: + iv_len = 0; + break; + default: + return -EINVAL; + } + + if (remaining_len < iv_len) + return -EINVAL; + + buf += iv_len; + remaining_len -= iv_len; + } + + category = *buf++; + /* category code is already taken care in %IEEE80211_MIN_ACTION_SIZE hence + * no need to adjust remaining_len + */ + + switch (category) { + case WLAN_CATEGORY_RADIO_MEASUREMENT: + /* need action code and dialog token */ + if (remaining_len < 2) + return -EINVAL; + + /* Packet Format: + * Action Code | Dialog Token | Variable Len (based on Action Code) + */ + action_code = *buf++; + dialog_token = *buf++; + remaining_len -= 2; + + if (ath11k_mac_vif_chan(arvif->vif, &def)) + return -ENOENT; + + cur_tx_power = arvif->vif->bss_conf.txpower; + max_tx_power = min(def.chan->max_reg_power, (int)ar->max_tx_power / 2); + ath11k_mac_handle_get_txpower(ar, arvif->vif, &cur_tx_power); + + switch (action_code) { + case WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST: + /* need variable fields to be present in len */ + if (remaining_len < 2) + return -EINVAL; + + /* Variable length format as defined in IEEE 802.11-2024, + * Figure 9-1187-Link Measurement Request frame Action field + * format. + * Transmit Power | Max Tx Power + * We fill both of these. + */ + *buf++ = cur_tx_power; + *buf = max_tx_power; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "RRM: Link Measurement Req dialog_token %u cur_tx_power %d max_tx_power %d\n", + dialog_token, cur_tx_power, max_tx_power); + break; + case WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT: + /* need variable fields to be present in len */ + if (remaining_len < 3) + return -EINVAL; + + /* Variable length format as defined in IEEE 802.11-2024, + * Figure 9-1188-Link Measurement Report frame Action field format + * TPC Report | Variable Fields + * + * TPC Report Format: + * Element ID | Len | Tx Power | Link Margin + * + * We fill Tx power in the TPC Report (2nd index) + */ + buf[2] = cur_tx_power; + + /* TODO: At present, Link margin data is not present so can't + * really fill it now. Once it is available, it can be added + * here + */ + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "RRM: Link Measurement Report dialog_token %u cur_tx_power %d\n", + dialog_token, cur_tx_power); + break; + default: + return -EINVAL; + } + break; + default: + /* nothing to fill */ + return 0; + } + + return 0; +} + +static int ath11k_mac_mgmt_frame_fill_elem_data(struct ath11k_vif *arvif, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_action(hdr->frame_control)) + return 0; + + return ath11k_mac_mgmt_action_frame_fill_elem_data(arvif, skb); +} + static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); @@ -6126,6 +6423,19 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) arvif = ath11k_vif_to_arvif(skb_cb->vif); mutex_lock(&ar->conf_mutex); if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { + /* Fill in the data which is required to be filled by the driver + * For example: Max Tx power in Link Measurement Request/Report + */ + ret = ath11k_mac_mgmt_frame_fill_elem_data(arvif, skb); + if (ret) { + /* If we couldn't fill the data due to any reason, + * let's not discard transmitting the packet. + */ + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "Failed to fill the required data for the mgmt packet err %d\n", + ret); + } + ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", @@ -9079,81 +9389,6 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo, } } -static void ath11k_mac_fw_stats_reset(struct ath11k *ar) -{ - spin_lock_bh(&ar->data_lock); - ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); - ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); - ar->fw_stats.num_vdev_recvd = 0; - ar->fw_stats.num_bcn_recvd = 0; - spin_unlock_bh(&ar->data_lock); -} - -int ath11k_mac_fw_stats_request(struct ath11k *ar, - struct stats_request_params *req_param) -{ - struct ath11k_base *ab = ar->ab; - unsigned long time_left; - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - ath11k_mac_fw_stats_reset(ar); - - reinit_completion(&ar->fw_stats_complete); - reinit_completion(&ar->fw_stats_done); - - ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); - - if (ret) { - ath11k_warn(ab, "could not request fw stats (%d)\n", - ret); - return ret; - } - - time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); - if (!time_left) - return -ETIMEDOUT; - - /* FW stats can get split when exceeding the stats data buffer limit. - * In that case, since there is no end marking for the back-to-back - * received 'update stats' event, we keep a 3 seconds timeout in case, - * fw_stats_done is not marked yet - */ - time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); - if (!time_left) - return -ETIMEDOUT; - - return 0; -} - -static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id, - u32 vdev_id, u32 stats_id) -{ - struct ath11k_base *ab = ar->ab; - struct stats_request_params req_param; - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - if (ar->state != ATH11K_STATE_ON) - return -ENETDOWN; - - req_param.pdev_id = pdev_id; - req_param.vdev_id = vdev_id; - req_param.stats_id = stats_id; - - ret = ath11k_mac_fw_stats_request(ar, &req_param); - if (ret) - ath11k_warn(ab, "failed to request fw stats: %d\n", ret); - - ath11k_dbg(ab, ATH11K_DBG_WMI, - "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", - pdev_id, vdev_id, stats_id); - - return ret; -} - static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -9539,66 +9774,6 @@ exit: return ret; } -static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - unsigned int link_id, - int *dbm) -{ - struct ath11k *ar = hw->priv; - struct ath11k_base *ab = ar->ab; - struct ath11k_fw_stats_pdev *pdev; - int ret; - - /* Final Tx power is minimum of Target Power, CTL power, Regulatory - * Power, PSD EIRP Power. We just know the Regulatory power from the - * regulatory rules obtained. FW knows all these power and sets the min - * of these. Hence, we request the FW pdev stats in which FW reports - * the minimum of all vdev's channel Tx power. - */ - mutex_lock(&ar->conf_mutex); - - /* Firmware doesn't provide Tx power during CAC hence no need to fetch - * the stats. - */ - if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { - mutex_unlock(&ar->conf_mutex); - return -EAGAIN; - } - - ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, - WMI_REQUEST_PDEV_STAT); - if (ret) { - ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); - goto err_fallback; - } - - spin_lock_bh(&ar->data_lock); - pdev = list_first_entry_or_null(&ar->fw_stats.pdevs, - struct ath11k_fw_stats_pdev, list); - if (!pdev) { - spin_unlock_bh(&ar->data_lock); - goto err_fallback; - } - - /* tx power is set as 2 units per dBm in FW. */ - *dbm = pdev->chan_tx_power / 2; - - spin_unlock_bh(&ar->data_lock); - mutex_unlock(&ar->conf_mutex); - - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n", - pdev->chan_tx_power, *dbm); - return 0; - -err_fallback: - mutex_unlock(&ar->conf_mutex); - /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */ - *dbm = vif->bss_conf.txpower; - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n", - *dbm); - return 0; -} - static int ath11k_mac_station_add(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -10368,6 +10543,8 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; + ar->hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; + ar->max_num_stations = TARGET_NUM_STATIONS(ab); ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d8655badd96d..7114eca8810d 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/module.h> @@ -177,6 +177,19 @@ static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); } +static void ath11k_pci_restore_window(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + spin_lock_bh(&ab_pci->window_lock); + + iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | ab_pci->register_window, + ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + + spin_unlock_bh(&ab_pci->window_lock); +} + static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) { u32 val, delay; @@ -201,6 +214,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET); if (val == 0xffffffff) ath11k_warn(ab, "link down error during global reset\n"); + + /* Restore window register as its content is cleared during + * hardware global reset, such that it aligns with host cache. + */ + ath11k_pci_restore_window(ab); } static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index c33c7865145c..1e3005a4b64c 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef _ATH11K_PCI_H #define _ATH11K_PCI_H @@ -35,18 +35,18 @@ #define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 #define PCIE_INT_CLEAR_ALL 0xffffffff -#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \ - (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel) +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \ + ((ab)->hw_params.regs->pcie_qserdes_sysclk_en_sel) #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 #define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff -#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \ - (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base) +#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \ + ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base) #define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02 -#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \ - (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4) +#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \ + ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4) #define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52 -#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \ - (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc) +#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \ + ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc) #define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff #define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index aea56c38bf8f..ff6a97e328b8 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -3177,7 +3177,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, sq->sq_node = service->node; sq->sq_port = service->port; - ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)sq, sizeof(*sq), 0); if (ret) { ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index e3b444333dee..110035dae8a6 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -2061,10 +2061,13 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; if (param->vht_capable) { - mcs->rx_max_rate = param->rx_max_rate; - mcs->rx_mcs_set = param->rx_mcs_set; - mcs->tx_max_rate = param->tx_max_rate; - mcs->tx_mcs_set = param->tx_mcs_set; + /* firmware interprets mcs->tx_mcs_set field as peer's + * RX capability + */ + mcs->tx_max_rate = param->rx_max_rate; + mcs->tx_mcs_set = param->rx_mcs_set; + mcs->rx_max_rate = param->tx_max_rate; + mcs->rx_mcs_set = param->tx_mcs_set; } /* HE Rates */ @@ -2088,8 +2091,11 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE); - he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i]; - he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i]; + /* firmware interprets mcs->rx_mcs_set field as peer's + * RX capability + */ + he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; ptr += sizeof(*he_mcs); } diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 9fcffaa2f383..0f0de24a3840 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH11K_WMI_H @@ -3463,20 +3463,6 @@ struct scan_cancel_param { u32 pdev_id; }; -struct wmi_bcn_send_from_host_cmd { - u32 tlv_header; - u32 vdev_id; - u32 data_len; - union { - u32 frag_ptr; - u32 frag_ptr_lo; - }; - u32 frame_ctrl; - u32 dtim_flag; - u32 bcn_antenna; - u32 frag_ptr_hi; -}; - #define WMI_CHAN_INFO_MODE GENMASK(5, 0) #define WMI_CHAN_INFO_HT40_PLUS BIT(6) #define WMI_CHAN_INFO_PASSIVE BIT(7) @@ -4133,8 +4119,10 @@ struct wmi_rate_set { struct wmi_vht_rate_set { u32 tlv_header; u32 rx_max_rate; + /* MCS at which the peer can transmit */ u32 rx_mcs_set; u32 tx_max_rate; + /* MCS at which the peer can receive */ u32 tx_mcs_set; u32 tx_max_mcs_nss; } __packed; diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 5d494c5cdc0d..cc352eef1939 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ @@ -1250,7 +1249,6 @@ void ath12k_fw_stats_reset(struct ath12k *ar) spin_lock_bh(&ar->data_lock); ath12k_fw_stats_free(&ar->fw_stats); ar->fw_stats.num_vdev_recvd = 0; - ar->fw_stats.num_bcn_recvd = 0; spin_unlock_bh(&ar->data_lock); } @@ -2106,14 +2104,27 @@ static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag) ret = ath12k_core_soc_create(ab); if (ret) { mutex_unlock(&ab->core_lock); - ath12k_err(ab, "failed to create soc core: %d\n", ret); - return ret; + ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret); + goto destroy; } mutex_unlock(&ab->core_lock); } return 0; + +destroy: + for (i--; i >= 0; i--) { + ab = ag->ab[i]; + if (!ab) + continue; + + mutex_lock(&ab->core_lock); + ath12k_core_soc_destroy(ab); + mutex_unlock(&ab->core_lock); + } + + return ret; } void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) @@ -2188,7 +2199,7 @@ int ath12k_core_init(struct ath12k_base *ab) if (ret) { mutex_unlock(&ag->mutex); ath12k_warn(ab, "unable to create hw group\n"); - goto err_destroy_hw_group; + goto err_unassign_hw_group; } } @@ -2196,8 +2207,7 @@ int ath12k_core_init(struct ath12k_base *ab) return 0; -err_destroy_hw_group: - ath12k_core_hw_group_destroy(ab->ag); +err_unassign_hw_group: ath12k_core_hw_group_unassign(ab); err_unregister_notifier: ath12k_core_panic_notifier_unregister(ab); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 3d1956966a48..3c1e0069be1e 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -355,6 +355,8 @@ struct ath12k_link_vif { struct wmi_vdev_install_key_arg group_key; bool pairwise_key_done; u16 num_stations; + bool is_csa_in_progress; + struct wiphy_work bcn_tx_work; }; struct ath12k_vif { @@ -644,7 +646,6 @@ struct ath12k_fw_stats { struct list_head vdevs; struct list_head bcn; u32 num_vdev_recvd; - u32 num_bcn_recvd; }; struct ath12k_dbg_htt_stats { @@ -963,6 +964,7 @@ struct ath12k_device_dp_stats { u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX]; u32 tx_enqueued[DP_TCL_NUM_RING_MAX]; u32 tx_completed[DP_TCL_NUM_RING_MAX]; + u32 reo_excep_msdu_buf_type; }; struct ath12k_reg_freq { diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index 16601a8c3644..d6a86f075d73 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "core.h" @@ -1178,6 +1178,9 @@ static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file, len += scnprintf(buf + len, size - len, "\n"); } + len += scnprintf(buf + len, size - len, "\nREO excep MSDU buf type:%u\n", + device_stats->reo_excep_msdu_buf_type); + len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n"); for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) { @@ -1283,6 +1286,7 @@ static int ath12k_open_vdev_stats(struct inode *inode, struct file *file) ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, buf); + ath12k_fw_stats_reset(ar); file->private_data = no_free_ptr(buf); @@ -1349,12 +1353,7 @@ static int ath12k_open_bcn_stats(struct inode *inode, struct file *file) ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, buf); - /* since beacon stats request is looped for all active VDEVs, saved fw - * stats is not freed for each request until done for all active VDEVs - */ - spin_lock_bh(&ar->data_lock); - ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn); - spin_unlock_bh(&ar->data_lock); + ath12k_fw_stats_reset(ar); file->private_data = no_free_ptr(buf); @@ -1415,6 +1414,7 @@ static int ath12k_open_pdev_stats(struct inode *inode, struct file *file) ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, buf); + ath12k_fw_stats_reset(ar); file->private_data = no_free_ptr(buf); diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 009c49502148..39d1967584db 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "dp_mon.h" @@ -105,7 +105,7 @@ static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vh if (ppdu_info->is_stbc && nsts > 0) nsts = ((nsts + 1) >> 1) - 1; - ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK); + ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1; ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); @@ -129,7 +129,7 @@ static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC); ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); - ppdu_info->nss = (ppdu_info->mcs >> 3); + ppdu_info->nss = (ppdu_info->mcs >> 3) + 1; } static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, @@ -233,7 +233,9 @@ ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *of value = value << HE_STA_ID_SHIFT; ppdu_info->he_data4 |= value; - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); + ppdu_info->nss = + u32_get_bits(info0, + HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1; ppdu_info->beamformed = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); } @@ -261,7 +263,9 @@ ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b value = value << HE_STA_ID_SHIFT; ppdu_info->he_data4 |= value; - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS); + ppdu_info->nss = + u32_get_bits(info0, + HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1; } static void @@ -553,7 +557,8 @@ static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info * ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC); ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF); dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + ppdu_info->nss = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1; ppdu_info->dcm = dcm; } @@ -2179,7 +2184,7 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, spin_unlock_bh(&ar->data_lock); rxs->flag |= RX_FLAG_MACTIME_START; - rxs->nss = ppduinfo->nss + 1; + rxs->nss = ppduinfo->nss; if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, ar->ab->wmi_ab.svc_map)) rxs->signal = ppduinfo->rssi_comb; diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 5e5c14a70316..d28d8ffec0f8 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/ieee80211.h> @@ -1089,6 +1089,8 @@ static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp, { struct dp_reo_update_rx_queue_elem *elem; + lockdep_assert_held(&dp->ab->base_lock); + elem = kzalloc(sizeof(*elem), GFP_ATOMIC); if (!elem) return -ENOMEM; @@ -3781,6 +3783,50 @@ exit: return 0; } +static int ath12k_dp_h_msdu_buffer_type(struct ath12k_base *ab, + struct list_head *list, + struct hal_reo_dest_ring *desc) +{ + struct ath12k_rx_desc_info *desc_info; + struct ath12k_skb_rxcb *rxcb; + struct sk_buff *msdu; + u64 desc_va; + + ab->device_stats.reo_excep_msdu_buf_type++; + + desc_va = (u64)le32_to_cpu(desc->buf_va_hi) << 32 | + le32_to_cpu(desc->buf_va_lo); + desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va; + if (!desc_info) { + u32 cookie; + + cookie = le32_get_bits(desc->buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + desc_info = ath12k_dp_get_rx_desc(ab, cookie); + if (!desc_info) { + ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", + cookie); + return -EINVAL; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) { + ath12k_warn(ab, "rx exception, magic check failed with value: %u\n", + desc_info->magic); + return -EINVAL; + } + + msdu = desc_info->skb; + desc_info->skb = NULL; + list_add_tail(&desc_info->list, list); + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + dev_kfree_skb_any(msdu); + + return 0; +} + int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { @@ -3825,6 +3871,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, drop = false; ab->device_stats.err_ring_pkts++; + hw_link_id = le32_get_bits(reo_desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + device_id = hw_links[hw_link_id].device_id; + partner_ab = ath12k_ag_to_ab(ag, device_id); + + /* Below case is added to handle data packet from un-associated clients. + * As it is expected that AST lookup will fail for + * un-associated station's data packets. + */ + if (le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) == + HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) { + if (!ath12k_dp_h_msdu_buffer_type(partner_ab, + &rx_desc_used_list[device_id], + reo_desc)) { + num_buffs_reaped[device_id]++; + tot_n_bufs_reaped++; + } + goto next_desc; + } + ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, &desc_bank); if (ret) { @@ -3833,11 +3899,6 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, continue; } - hw_link_id = le32_get_bits(reo_desc->info0, - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); - device_id = hw_links[hw_link_id].device_id; - partner_ab = ath12k_ag_to_ab(ag, device_id); - pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, hw_links[hw_link_id].pdev_idx); ar = partner_ab->pdevs[pdev_id].ar; @@ -3886,6 +3947,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, } } +next_desc: if (tot_n_bufs_reaped >= quota) { tot_n_bufs_reaped = quota; goto exit; diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c index 669096278fdd..c4443ca05cd6 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.c +++ b/drivers/net/wireless/ath/ath12k/hal_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "debug.h" @@ -323,7 +323,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, { enum hal_reo_dest_ring_push_reason push_reason; enum hal_reo_dest_ring_error_code err_code; - u32 cookie, val; + u32 cookie; push_reason = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_PUSH_REASON); @@ -338,12 +338,6 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, return -EINVAL; } - val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE); - if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) { - ath12k_warn(ab, "expected buffer type link_desc"); - return -EINVAL; - } - ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie); *desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index db351c922018..f7a2a544bef2 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -533,6 +533,30 @@ ath12k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX]) return 1; } +static u32 +ath12k_mac_max_eht_nss(const u16 eht_mcs_mask[NL80211_EHT_NSS_MAX]) +{ + int nss; + + for (nss = NL80211_EHT_NSS_MAX - 1; nss >= 0; nss--) + if (eht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u32 +ath12k_mac_max_eht_mcs_nss(const u8 *eht_mcs, int eht_mcs_set_size) +{ + int i; + u8 nss = 0; + + for (i = 0; i < eht_mcs_set_size; i++) + nss = max(nss, u8_get_bits(eht_mcs[i], IEEE80211_EHT_MCS_NSS_RX)); + + return nss; +} + static u8 ath12k_parse_mpdudensity(u8 mpdudensity) { /* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing": @@ -2249,7 +2273,6 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, struct cfg80211_chan_def def; enum nl80211_band band; u16 *vht_mcs_mask; - u16 tx_mcs_map; u8 ampdu_factor; u8 max_nss, vht_mcs; int i, vht_nss, nss_idx; @@ -2340,10 +2363,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, arg->peer_nss = min(link_sta->rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); - arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); + arg->rx_mcs_set = ath12k_peer_assoc_h_vht_limit(arg->rx_mcs_set, vht_mcs_mask); - tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); - arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask); + arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); + arg->tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default. * VHT MCS rate 10 and 11 is not supported in 11ac standard. @@ -2625,9 +2648,10 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_160: v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; - v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask); + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; arg->peer_he_mcs_count++; @@ -2637,10 +2661,10 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, default: v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); - v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; arg->peer_he_mcs_count++; @@ -3004,6 +3028,18 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, return MODE_UNKNOWN; } +static bool +ath12k_peer_assoc_h_eht_masked(const u16 eht_mcs_mask[NL80211_EHT_NSS_MAX]) +{ + int nss; + + for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) + if (eht_mcs_mask[nss]) + return false; + + return true; +} + static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ath12k_link_sta *arsta, @@ -3015,6 +3051,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; + const u16 *eht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -3029,6 +3066,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; + eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs; link_sta = ath12k_mac_get_link_sta(arsta); if (!link_sta) { @@ -3039,7 +3077,8 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, switch (band) { case NL80211_BAND_2GHZ: - if (link_sta->eht_cap.has_eht) { + if (link_sta->eht_cap.has_eht && + !ath12k_peer_assoc_h_eht_masked(eht_mcs_mask)) { if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11BE_EHT40_2G; else @@ -3102,37 +3141,50 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, WARN_ON(phymode == MODE_UNKNOWN); } +#define ATH12K_EHT_MCS_7_ENABLED 0x00FF +#define ATH12K_EHT_MCS_9_ENABLED 0x0300 +#define ATH12K_EHT_MCS_11_ENABLED 0x0C00 +#define ATH12K_EHT_MCS_13_ENABLED 0x3000 + static void ath12k_mac_set_eht_mcs(u8 rx_tx_mcs7, u8 rx_tx_mcs9, u8 rx_tx_mcs11, u8 rx_tx_mcs13, - u32 *rx_mcs, u32 *tx_mcs) -{ - *rx_mcs = 0; - u32p_replace_bits(rx_mcs, - u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX), - WMI_EHT_MCS_NSS_0_7); - u32p_replace_bits(rx_mcs, - u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX), - WMI_EHT_MCS_NSS_8_9); - u32p_replace_bits(rx_mcs, - u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX), - WMI_EHT_MCS_NSS_10_11); - u32p_replace_bits(rx_mcs, - u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX), - WMI_EHT_MCS_NSS_12_13); - - *tx_mcs = 0; - u32p_replace_bits(tx_mcs, - u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX), - WMI_EHT_MCS_NSS_0_7); - u32p_replace_bits(tx_mcs, - u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX), - WMI_EHT_MCS_NSS_8_9); - u32p_replace_bits(tx_mcs, - u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX), - WMI_EHT_MCS_NSS_10_11); - u32p_replace_bits(tx_mcs, - u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX), - WMI_EHT_MCS_NSS_12_13); + u32 *rx_mcs, u32 *tx_mcs, + const u16 eht_mcs_limit[NL80211_EHT_NSS_MAX]) +{ + int nss; + u8 mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0; + u8 peer_mcs_7, peer_mcs_9, peer_mcs_11, peer_mcs_13; + + for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) { + if (eht_mcs_limit[nss] & ATH12K_EHT_MCS_7_ENABLED) + mcs_7++; + if (eht_mcs_limit[nss] & ATH12K_EHT_MCS_9_ENABLED) + mcs_9++; + if (eht_mcs_limit[nss] & ATH12K_EHT_MCS_11_ENABLED) + mcs_11++; + if (eht_mcs_limit[nss] & ATH12K_EHT_MCS_13_ENABLED) + mcs_13++; + } + + peer_mcs_7 = u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX); + peer_mcs_9 = u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX); + peer_mcs_11 = u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX); + peer_mcs_13 = u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX); + + *rx_mcs = u32_encode_bits(min(peer_mcs_7, mcs_7), WMI_EHT_MCS_NSS_0_7) | + u32_encode_bits(min(peer_mcs_9, mcs_9), WMI_EHT_MCS_NSS_8_9) | + u32_encode_bits(min(peer_mcs_11, mcs_11), WMI_EHT_MCS_NSS_10_11) | + u32_encode_bits(min(peer_mcs_13, mcs_13), WMI_EHT_MCS_NSS_12_13); + + peer_mcs_7 = u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX); + peer_mcs_9 = u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX); + peer_mcs_11 = u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX); + peer_mcs_13 = u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX); + + *tx_mcs = u32_encode_bits(min(peer_mcs_7, mcs_7), WMI_EHT_MCS_NSS_0_7) | + u32_encode_bits(min(peer_mcs_9, mcs_9), WMI_EHT_MCS_NSS_8_9) | + u32_encode_bits(min(peer_mcs_11, mcs_11), WMI_EHT_MCS_NSS_10_11) | + u32_encode_bits(min(peer_mcs_13, mcs_13), WMI_EHT_MCS_NSS_12_13); } static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, @@ -3171,13 +3223,22 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, struct ath12k_wmi_peer_assoc_arg *arg) { struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + const struct ieee80211_eht_mcs_nss_supp *own_eht_mcs_nss_supp; const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; + const struct ieee80211_sta_eht_cap *eht_cap, *own_eht_cap; + const struct ieee80211_sband_iftype_data *iftd; const struct ieee80211_eht_mcs_nss_supp_bw *bw; - const struct ieee80211_sta_eht_cap *eht_cap; const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_link_sta *link_sta; struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def def; + bool user_rate_valid = true; + enum nl80211_band band; + int eht_nss, nss_idx; u32 *rx_mcs, *tx_mcs; + u16 *eht_mcs_mask; + u8 max_nss = 0; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -3199,6 +3260,22 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, if (!he_cap->has_he || !eht_cap->has_eht) return; + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) + return; + + band = def.chan->band; + eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs; + + iftd = ieee80211_get_sband_iftype_data(&ar->mac.sbands[band], vif->type); + if (!iftd) { + ath12k_warn(ar->ab, + "unable to access iftype_data in struct ieee80211_supported_band\n"); + return; + } + + own_eht_cap = &iftd->eht_cap; + own_eht_mcs_nss_supp = &own_eht_cap->eht_mcs_nss_supp; + arg->eht_flag = true; if ((eht_cap->eht_cap_elem.phy_cap_info[5] & @@ -3215,6 +3292,28 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, rx_mcs = arg->peer_eht_rx_mcs_set; tx_mcs = arg->peer_eht_tx_mcs_set; + eht_nss = ath12k_mac_max_eht_mcs_nss((void *)own_eht_mcs_nss_supp, + sizeof(*own_eht_mcs_nss_supp)); + if (eht_nss > link_sta->rx_nss) { + user_rate_valid = false; + for (nss_idx = (link_sta->rx_nss - 1); nss_idx >= 0; nss_idx--) { + if (eht_mcs_mask[nss_idx]) { + user_rate_valid = true; + break; + } + } + } + + if (!user_rate_valid) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Setting eht range MCS value to peer supported nss %d for peer %pM\n", + link_sta->rx_nss, arsta->addr); + eht_mcs_mask[link_sta->rx_nss - 1] = eht_mcs_mask[eht_nss - 1]; + } + + bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz; + bw = &eht_cap->eht_mcs_nss_supp.bw._80; + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_320: bw = &eht_cap->eht_mcs_nss_supp.bw._320; @@ -3223,7 +3322,8 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, bw->rx_tx_mcs11_max_nss, bw->rx_tx_mcs13_max_nss, &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320], - &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320]); + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320], + eht_mcs_mask); arg->peer_eht_mcs_count++; fallthrough; case IEEE80211_STA_RX_BW_160: @@ -3233,15 +3333,13 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, bw->rx_tx_mcs11_max_nss, bw->rx_tx_mcs13_max_nss, &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160], - &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160]); + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160], + eht_mcs_mask); arg->peer_eht_mcs_count++; fallthrough; default: - if ((he_cap->he_cap_elem.phy_cap_info[0] & - (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) { + if (!(link_sta->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz; ath12k_mac_set_eht_mcs(bw_20->rx_tx_mcs7_max_nss, @@ -3249,7 +3347,8 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, bw_20->rx_tx_mcs11_max_nss, bw_20->rx_tx_mcs13_max_nss, &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], - &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], + eht_mcs_mask); } else { bw = &eht_cap->eht_mcs_nss_supp.bw._80; ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, @@ -3257,7 +3356,8 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, bw->rx_tx_mcs11_max_nss, bw->rx_tx_mcs13_max_nss, &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], - &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], + eht_mcs_mask); } arg->peer_eht_mcs_count++; @@ -3266,6 +3366,41 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, arg->punct_bitmap = ~arvif->punct_bitmap; arg->eht_disable_mcs15 = link_conf->eht_disable_mcs15; + + if (!(link_sta->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + if (bw_20->rx_tx_mcs13_max_nss) + max_nss = max(max_nss, u8_get_bits(bw_20->rx_tx_mcs13_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + if (bw_20->rx_tx_mcs11_max_nss) + max_nss = max(max_nss, u8_get_bits(bw_20->rx_tx_mcs11_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + if (bw_20->rx_tx_mcs9_max_nss) + max_nss = max(max_nss, u8_get_bits(bw_20->rx_tx_mcs9_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + if (bw_20->rx_tx_mcs7_max_nss) + max_nss = max(max_nss, u8_get_bits(bw_20->rx_tx_mcs7_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + } else { + if (bw->rx_tx_mcs13_max_nss) + max_nss = max(max_nss, u8_get_bits(bw->rx_tx_mcs13_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + if (bw->rx_tx_mcs11_max_nss) + max_nss = max(max_nss, u8_get_bits(bw->rx_tx_mcs11_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + if (bw->rx_tx_mcs9_max_nss) + max_nss = max(max_nss, u8_get_bits(bw->rx_tx_mcs9_max_nss, + IEEE80211_EHT_MCS_NSS_RX)); + } + + max_nss = min(max_nss, (uint8_t)eht_nss); + + arg->peer_nss = min(link_sta->rx_nss, max_nss); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac eht peer %pM nss %d mcs cnt %d ru_punct_bitmap 0x%x\n", + arsta->addr, arg->peer_nss, arg->peer_eht_mcs_count, + arg->punct_bitmap); } static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta, @@ -3834,6 +3969,38 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } +static void ath12k_mac_bcn_tx_event(struct ath12k_link_vif *arvif) +{ + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_bss_conf *link_conf; + + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(arvif->ar->ab, "failed to get link conf for vdev %u\n", + arvif->vdev_id); + return; + } + + if (link_conf->color_change_active) { + if (ieee80211_beacon_cntdwn_is_complete(vif, arvif->link_id)) { + ieee80211_color_change_finish(vif, arvif->link_id); + return; + } + + ieee80211_beacon_update_cntdwn(vif, arvif->link_id); + ath12k_mac_setup_bcn_tmpl(arvif); + } +} + +static void ath12k_mac_bcn_tx_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif, + bcn_tx_work); + + lockdep_assert_wiphy(wiphy); + ath12k_mac_bcn_tx_event(arvif); +} + static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif, struct ath12k_link_vif *arvif, int link_id) { @@ -3863,6 +4030,7 @@ static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif, INIT_LIST_HEAD(&arvif->list); INIT_DELAYED_WORK(&arvif->connection_loss_work, ath12k_mac_vif_sta_connection_loss_work); + wiphy_work_init(&arvif->bcn_tx_work, ath12k_mac_bcn_tx_work); arvif->num_stations = 0; @@ -3875,6 +4043,8 @@ static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif, sizeof(arvif->bitrate_mask.control[i].vht_mcs)); memset(arvif->bitrate_mask.control[i].he_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].he_mcs)); + memset(arvif->bitrate_mask.control[i].eht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].eht_mcs)); } /* Handle MLO related assignments */ @@ -3900,6 +4070,7 @@ static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, lockdep_assert_wiphy(ah->hw->wiphy); cancel_delayed_work_sync(&arvif->connection_loss_work); + wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", arvif->vdev_id, arvif->link_id); @@ -4202,6 +4373,30 @@ static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif, chandef->chan->band == NL80211_BAND_6GHZ; } +static void ath12k_wmi_vdev_params_up(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ath12k_link_vif *tx_arvif, + struct ieee80211_bss_conf *info, u16 aid) +{ + struct ath12k_wmi_vdev_up_params params = { + .vdev_id = arvif->vdev_id, + .aid = aid, + .bssid = arvif->bssid + }; + int ret; + + if (tx_arvif) { + params.tx_bssid = tx_arvif->bssid; + params.nontx_profile_idx = info->bssid_index; + params.nontx_profile_cnt = 1 << info->bssid_indicator; + } + + ret = ath12k_wmi_vdev_up(arvif->ar, ¶ms); + if (ret) + ath12k_warn(ar->ab, "failed to bring vdev up %d: %d\n", + arvif->vdev_id, ret); +} + static void ath12k_mac_bss_info_changed(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info, @@ -4210,6 +4405,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, struct ath12k_vif *ahvif = arvif->ahvif; struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ieee80211_vif_cfg *vif_cfg = &vif->cfg; + struct ath12k_link_vif *tx_arvif; struct cfg80211_chan_def def; u32 param_id, param_value; enum nl80211_band band; @@ -4218,9 +4414,9 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, u32 preamble; u16 hw_value; u16 bitrate; - int ret; u8 rateidx; u32 rate; + int ret; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -4253,12 +4449,41 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, "Set burst beacon mode for VDEV: %d\n", arvif->vdev_id); + /* In MBSSID case, need to install transmitting VIF's template first */ + ret = ath12k_mac_setup_bcn_tmpl(arvif); if (ret) ath12k_warn(ar->ab, "failed to update bcn template: %d\n", ret); + + if (!arvif->is_csa_in_progress) + goto skip_vdev_up; + + tx_arvif = ath12k_mac_get_tx_arvif(arvif, info); + if (tx_arvif && arvif != tx_arvif && tx_arvif->is_csa_in_progress) + /* skip non tx vif's */ + goto skip_vdev_up; + + ath12k_wmi_vdev_params_up(ar, arvif, tx_arvif, info, ahvif->aid); + + arvif->is_csa_in_progress = false; + + if (tx_arvif && arvif == tx_arvif) { + struct ath12k_link_vif *arvif_itr; + + list_for_each_entry(arvif_itr, &ar->arvifs, list) { + if (!arvif_itr->is_csa_in_progress) + continue; + + ath12k_wmi_vdev_params_up(ar, arvif, tx_arvif, + info, ahvif->aid); + arvif_itr->is_csa_in_progress = false; + } + } } +skip_vdev_up: + if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { arvif->dtim_period = info->dtim_period; @@ -4475,8 +4700,25 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, ATH12K_BSS_COLOR_AP_PERIODS, info->he_bss_color.enabled); if (ret) - ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", + ath12k_warn(ar->ab, "failed to set bss color collision on vdev %u: %d\n", arvif->vdev_id, ret); + + param_id = WMI_VDEV_PARAM_BSS_COLOR; + if (info->he_bss_color.enabled) + param_value = info->he_bss_color.color << + IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET; + else + param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED; + + ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + param_value); + if (ret) + ath12k_warn(ar->ab, "failed to set bss color param on vdev %u: %d\n", + arvif->vdev_id, ret); + else + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "bss color param 0x%x set on vdev %u\n", + param_value, arvif->vdev_id); } else if (vif->type == NL80211_IFTYPE_STATION) { ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar, arvif->vdev_id, @@ -4837,8 +5079,6 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar, if (ah->state != ATH12K_HW_STATE_ON) return -ENETDOWN; - ath12k_fw_stats_reset(ar); - reinit_completion(&ar->fw_stats_complete); reinit_completion(&ar->fw_stats_done); @@ -4936,6 +5176,7 @@ static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw, ar->chan_tx_pwr = pdev->chan_tx_power / 2; spin_unlock_bh(&ar->data_lock); ar->last_tx_power_update = jiffies; + ath12k_fw_stats_reset(ar); send_tx_power: *dbm = ar->chan_tx_pwr; @@ -5059,7 +5300,8 @@ static int ath12k_mac_initiate_hw_scan(struct ieee80211_hw *hw, ret = ath12k_mac_vdev_create(ar, arvif); if (ret) { ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret); - return -EINVAL; + ath12k_mac_unassign_link_vif(arvif); + return ret; } } @@ -5719,6 +5961,20 @@ ath12k_mac_bitrate_mask_num_he_rates(struct ath12k *ar, } static int +ath12k_mac_bitrate_mask_num_eht_rates(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++) + num_rates += hweight16(mask->control[band].eht_mcs[i]); + + return num_rates; +} + +static int ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif, struct ath12k_link_sta *arsta, const struct cfg80211_bitrate_mask *mask, @@ -5818,6 +6074,65 @@ ath12k_mac_set_peer_he_fixed_rate(struct ath12k_link_vif *arvif, return ret; } +static int +ath12k_mac_set_peer_eht_fixed_rate(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, + const struct cfg80211_bitrate_mask *mask, + enum nl80211_band band) +{ + struct ath12k_sta *ahsta = arsta->ahsta; + struct ath12k *ar = arvif->ar; + struct ieee80211_sta *sta; + struct ieee80211_link_sta *link_sta; + u8 eht_rate, nss = 0; + u32 rate_code; + int ret, i; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + sta = ath12k_ahsta_to_sta(ahsta); + + for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++) { + if (hweight16(mask->control[band].eht_mcs[i]) == 1) { + nss = i + 1; + eht_rate = ffs(mask->control[band].eht_mcs[i]) - 1; + } + } + + if (!nss) { + ath12k_warn(ar->ab, "No single EHT Fixed rate found to set for %pM\n", + arsta->addr); + return -EINVAL; + } + + /* Avoid updating invalid nss as fixed rate*/ + link_sta = ath12k_mac_get_link_sta(arsta); + if (!link_sta || nss > link_sta->rx_nss) { + ath12k_warn(ar->ab, + "unable to access link sta for sta %pM link %u or fixed nss of %u is not supported by sta\n", + sta->addr, arsta->link_id, nss); + return -EINVAL; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "Setting Fixed EHT Rate for peer %pM. Device will not switch to any other selected rates\n", + arsta->addr); + + rate_code = ATH12K_HW_RATE_CODE(eht_rate, nss - 1, + WMI_RATE_PREAMBLE_EHT); + + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, + arvif->vdev_id, + WMI_PEER_PARAM_FIXED_RATE, + rate_code); + if (ret) + ath12k_warn(ar->ab, + "failed to update STA %pM Fixed Rate %d: %d\n", + arsta->addr, rate_code, ret); + + return ret; +} + static int ath12k_mac_station_assoc(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ath12k_link_sta *arsta, @@ -5830,7 +6145,7 @@ static int ath12k_mac_station_assoc(struct ath12k *ar, struct cfg80211_chan_def def; enum nl80211_band band; struct cfg80211_bitrate_mask *mask; - u8 num_vht_rates, num_he_rates; + u8 num_vht_rates, num_he_rates, num_eht_rates; u8 link_id = arvif->link_id; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -5873,10 +6188,11 @@ static int ath12k_mac_station_assoc(struct ath12k *ar, num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask); num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask); + num_eht_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask); - /* If single VHT/HE rate is configured (by set_bitrate_mask()), - * peer_assoc will disable VHT/HE. This is now enabled by a peer specific - * fixed param. + /* If single VHT/HE/EHT rate is configured (by set_bitrate_mask()), + * peer_assoc will disable VHT/HE/EHT. This is now enabled by a peer + * specific fixed param. * Note that all other rates and NSS will be disabled for this peer. */ link_sta = ath12k_mac_get_link_sta(arsta); @@ -5896,6 +6212,10 @@ static int ath12k_mac_station_assoc(struct ath12k *ar, ret = ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band); if (ret) return ret; + } else if (link_sta->eht_cap.has_eht && num_eht_rates == 1) { + ret = ath12k_mac_set_peer_eht_fixed_rate(arvif, arsta, mask, band); + if (ret) + return ret; } /* Re-assoc is run only to update supported rates for given station. It @@ -5958,8 +6278,9 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; + const u16 *eht_mcs_mask; u32 changed, bw, nss, mac_nss, smps, bw_prev; - int err, num_vht_rates, num_he_rates; + int err, num_vht_rates, num_he_rates, num_eht_rates; const struct cfg80211_bitrate_mask *mask; enum wmi_phy_mode peer_phymode; struct ath12k_link_sta *arsta; @@ -5980,6 +6301,7 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; + eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs; spin_lock_bh(&ar->data_lock); @@ -5997,6 +6319,7 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask), ath12k_mac_max_vht_nss(vht_mcs_mask), ath12k_mac_max_he_nss(he_mcs_mask)); + mac_nss = max(mac_nss, ath12k_mac_max_eht_nss(eht_mcs_mask)); nss = min(nss, mac_nss); struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = @@ -6082,6 +6405,8 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) mask); num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask); + num_eht_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band, + mask); /* Peer_assoc_prepare will reject vht rates in * bitrate_mask if its not available in range format and @@ -6106,9 +6431,18 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) band); } else if (link_sta->he_cap.has_he && num_he_rates == 1) { ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band); + } else if (link_sta->eht_cap.has_eht && num_eht_rates == 1) { + err = ath12k_mac_set_peer_eht_fixed_rate(arvif, arsta, + mask, band); + if (err) { + ath12k_warn(ar->ab, + "failed to set peer EHT fixed rate for STA %pM ret %d\n", + arsta->addr, err); + return; + } } else { - /* If the peer is non-VHT/HE or no fixed VHT/HE rate - * is provided in the new bitrate mask we set the + /* If the peer is non-VHT/HE/EHT or no fixed VHT/HE/EHT + * rate is provided in the new bitrate mask we set the * other rates using peer_assoc command. Also clear * the peer fixed rate settings as it has higher proprity * than peer assoc @@ -9687,6 +10021,12 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created) return -EINVAL; + if (ar->num_created_vdevs >= TARGET_NUM_VDEVS(ab)) { + ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", + TARGET_NUM_VDEVS(ab)); + return -ENOSPC; + } + link_id = arvif->link_id; if (link_id < IEEE80211_MLD_MAX_NUM_LINKS) { @@ -10046,12 +10386,6 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, if (arvif->is_created) goto flush; - if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) { - ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", - TARGET_NUM_VDEVS(ab)); - goto unlock; - } - ret = ath12k_mac_vdev_create(ar, arvif); if (ret) { ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret); @@ -10852,9 +11186,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, int n_vifs) { struct ath12k_wmi_vdev_up_params params = {}; - struct ath12k_link_vif *arvif; struct ieee80211_bss_conf *link_conf; struct ath12k_base *ab = ar->ab; + struct ath12k_link_vif *arvif; struct ieee80211_vif *vif; struct ath12k_vif *ahvif; u8 link_id; @@ -10915,6 +11249,28 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, continue; } + ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif, + vifs[i].new_ctx->def); + if (ret) { + ath12k_warn(ar->ab, + "failed to update puncturing bitmap %02x and width %d: %d\n", + vifs[i].new_ctx->def.punctured, + vifs[i].new_ctx->def.width, ret); + continue; + } + + /* Defer VDEV bring-up during CSA to avoid installing stale + * beacon templates. The beacon content is updated only + * after CSA finalize, so we mark CSA in progress and skip + * VDEV_UP for now. It will be handled later in + * bss_info_changed(). + */ + if (link_conf->csa_active && + arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + arvif->is_csa_in_progress = true; + continue; + } + ret = ath12k_mac_setup_bcn_tmpl(arvif); if (ret) ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", @@ -10935,16 +11291,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, arvif->vdev_id, ret); continue; } - - ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif, - vifs[i].new_ctx->def); - if (ret) { - ath12k_warn(ar->ab, - "failed to update puncturing bitmap %02x and width %d: %d\n", - vifs[i].new_ctx->def.punctured, - vifs[i].new_ctx->def.width, ret); - continue; - } } /* Restart the internal monitor vdev on new channel */ @@ -11849,6 +12195,9 @@ ath12k_mac_has_single_legacy_rate(struct ath12k *ar, if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask)) return false; + if (ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask)) + return false; + return num_rates == 1; } @@ -11871,11 +12220,15 @@ ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar, { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + const struct ieee80211_sband_iftype_data *data; const struct ieee80211_sta_he_cap *he_cap; u16 he_mcs_map = 0; + u16 eht_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; u8 he_nss_mask = 0; + u8 eht_nss_mask = 0; + u8 mcs_nss_len; int i; /* No need to consider legacy here. Basic rates are always present @@ -11919,7 +12272,60 @@ ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar, return false; } - if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask) + data = ieee80211_get_sband_iftype_data(sband, vif->type); + + mcs_nss_len = ieee80211_eht_mcs_nss_size(&data->he_cap.he_cap_elem, + &data->eht_cap.eht_cap_elem, + false); + if (mcs_nss_len == 4) { + /* 20 MHz only STA case */ + const struct ieee80211_eht_mcs_nss_supp_20mhz_only *eht_mcs_nss = + &data->eht_cap.eht_mcs_nss_supp.only_20mhz; + if (eht_mcs_nss->rx_tx_mcs13_max_nss) + eht_mcs_map = 0x1fff; + else if (eht_mcs_nss->rx_tx_mcs11_max_nss) + eht_mcs_map = 0x07ff; + else if (eht_mcs_nss->rx_tx_mcs9_max_nss) + eht_mcs_map = 0x01ff; + else + eht_mcs_map = 0x007f; + } else { + const struct ieee80211_eht_mcs_nss_supp_bw *eht_mcs_nss; + + switch (mcs_nss_len) { + case 9: + eht_mcs_nss = &data->eht_cap.eht_mcs_nss_supp.bw._320; + break; + case 6: + eht_mcs_nss = &data->eht_cap.eht_mcs_nss_supp.bw._160; + break; + case 3: + eht_mcs_nss = &data->eht_cap.eht_mcs_nss_supp.bw._80; + break; + default: + return false; + } + + if (eht_mcs_nss->rx_tx_mcs13_max_nss) + eht_mcs_map = 0x1fff; + else if (eht_mcs_nss->rx_tx_mcs11_max_nss) + eht_mcs_map = 0x7ff; + else + eht_mcs_map = 0x1ff; + } + + for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++) { + if (mask->control[band].eht_mcs[i] == 0) + continue; + + if (mask->control[band].eht_mcs[i] < eht_mcs_map) + eht_nss_mask |= BIT(i); + else + return false; + } + + if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask || + ht_nss_mask != eht_nss_mask) return false; if (ht_nss_mask == 0) @@ -11967,7 +12373,8 @@ ath12k_mac_get_single_legacy_rate(struct ath12k *ar, } static int -ath12k_mac_set_fixed_rate_gi_ltf(struct ath12k_link_vif *arvif, u8 he_gi, u8 he_ltf) +ath12k_mac_set_fixed_rate_gi_ltf(struct ath12k_link_vif *arvif, u8 gi, u8 ltf, + u32 param) { struct ath12k *ar = arvif->ar; int ret; @@ -11975,47 +12382,54 @@ ath12k_mac_set_fixed_rate_gi_ltf(struct ath12k_link_vif *arvif, u8 he_gi, u8 he_ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */ - if (he_gi && he_gi != 0xFF) - he_gi += 1; + if (gi && gi != 0xFF) + gi += 1; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_SGI, he_gi); + WMI_VDEV_PARAM_SGI, gi); if (ret) { - ath12k_warn(ar->ab, "failed to set HE GI:%d, error:%d\n", - he_gi, ret); + ath12k_warn(ar->ab, "failed to set GI:%d, error:%d\n", + gi, ret); return ret; } - /* start from 1 */ - if (he_ltf != 0xFF) - he_ltf += 1; + + if (param == WMI_VDEV_PARAM_HE_LTF) { + /* HE values start from 1 */ + if (ltf != 0xFF) + ltf += 1; + } else { + /* EHT values start from 5 */ + if (ltf != 0xFF) + ltf += 4; + } ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_HE_LTF, he_ltf); + param, ltf); if (ret) { - ath12k_warn(ar->ab, "failed to set HE LTF:%d, error:%d\n", - he_ltf, ret); + ath12k_warn(ar->ab, "failed to set LTF:%d, error:%d\n", + ltf, ret); return ret; } return 0; } static int -ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 he_gi, u8 he_ltf) +ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 gi, u8 ltf) { struct ath12k *ar = arvif->ar; int ret; - u32 he_ar_gi_ltf; + u32 ar_gi_ltf; - if (he_gi != 0xFF) { - switch (he_gi) { - case NL80211_RATE_INFO_HE_GI_0_8: - he_gi = WMI_AUTORATE_800NS_GI; + if (gi != 0xFF) { + switch (gi) { + case ATH12K_RATE_INFO_GI_0_8: + gi = WMI_AUTORATE_800NS_GI; break; - case NL80211_RATE_INFO_HE_GI_1_6: - he_gi = WMI_AUTORATE_1600NS_GI; + case ATH12K_RATE_INFO_GI_1_6: + gi = WMI_AUTORATE_1600NS_GI; break; - case NL80211_RATE_INFO_HE_GI_3_2: - he_gi = WMI_AUTORATE_3200NS_GI; + case ATH12K_RATE_INFO_GI_3_2: + gi = WMI_AUTORATE_3200NS_GI; break; default: ath12k_warn(ar->ab, "Invalid GI\n"); @@ -12023,16 +12437,16 @@ ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 he_gi, u8 he_ } } - if (he_ltf != 0xFF) { - switch (he_ltf) { - case NL80211_RATE_INFO_HE_1XLTF: - he_ltf = WMI_HE_AUTORATE_LTF_1X; + if (ltf != 0xFF) { + switch (ltf) { + case ATH12K_RATE_INFO_1XLTF: + ltf = WMI_AUTORATE_LTF_1X; break; - case NL80211_RATE_INFO_HE_2XLTF: - he_ltf = WMI_HE_AUTORATE_LTF_2X; + case ATH12K_RATE_INFO_2XLTF: + ltf = WMI_AUTORATE_LTF_2X; break; - case NL80211_RATE_INFO_HE_4XLTF: - he_ltf = WMI_HE_AUTORATE_LTF_4X; + case ATH12K_RATE_INFO_4XLTF: + ltf = WMI_AUTORATE_LTF_4X; break; default: ath12k_warn(ar->ab, "Invalid LTF\n"); @@ -12040,15 +12454,15 @@ ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 he_gi, u8 he_ } } - he_ar_gi_ltf = he_gi | he_ltf; + ar_gi_ltf = gi | ltf; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_AUTORATE_MISC_CFG, - he_ar_gi_ltf); + ar_gi_ltf); if (ret) { ath12k_warn(ar->ab, - "failed to set HE autorate GI:%u, LTF:%u params, error:%d\n", - he_gi, he_ltf, ret); + "failed to set autorate GI:%u, LTF:%u params, error:%d\n", + gi, ltf, ret); return ret; } @@ -12069,14 +12483,16 @@ static u32 ath12k_mac_nlgi_to_wmigi(enum nl80211_txrate_gi gi) static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif, u32 rate, u8 nss, u8 sgi, u8 ldpc, - u8 he_gi, u8 he_ltf, bool he_fixed_rate) + u8 he_gi, u8 he_ltf, bool he_fixed_rate, + u8 eht_gi, u8 eht_ltf, + bool eht_fixed_rate) { struct ieee80211_bss_conf *link_conf; struct ath12k *ar = arvif->ar; + bool he_support, eht_support, gi_ltf_set = false; u32 vdev_param; u32 param_value; int ret; - bool he_support; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -12085,6 +12501,7 @@ static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif, return -EINVAL; he_support = link_conf->he_support; + eht_support = link_conf->eht_support; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x\n", @@ -12094,7 +12511,11 @@ static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif, "he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", he_gi, he_ltf, he_fixed_rate); - if (!he_support) { + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "eht_gi 0x%02x eht_ltf 0x%02x eht_fixed_rate %d\n", + eht_gi, eht_ltf, eht_fixed_rate); + + if (!he_support && !eht_support) { vdev_param = WMI_VDEV_PARAM_FIXED_RATE; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); @@ -12123,14 +12544,34 @@ static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif, return ret; } + if (eht_support) { + if (eht_fixed_rate) + ret = ath12k_mac_set_fixed_rate_gi_ltf(arvif, eht_gi, eht_ltf, + WMI_VDEV_PARAM_EHT_LTF); + else + ret = ath12k_mac_set_auto_rate_gi_ltf(arvif, eht_gi, eht_ltf); + + if (ret) { + ath12k_warn(ar->ab, + "failed to set EHT LTF/GI params %d/%d: %d\n", + eht_gi, eht_ltf, ret); + return ret; + } + gi_ltf_set = true; + } + if (he_support) { if (he_fixed_rate) - ret = ath12k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf); + ret = ath12k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf, + WMI_VDEV_PARAM_HE_LTF); else ret = ath12k_mac_set_auto_rate_gi_ltf(arvif, he_gi, he_ltf); if (ret) return ret; - } else { + gi_ltf_set = true; + } + + if (!gi_ltf_set) { vdev_param = WMI_VDEV_PARAM_SGI; param_value = ath12k_mac_nlgi_to_wmigi(sgi); ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, @@ -12195,6 +12636,38 @@ ath12k_mac_he_mcs_range_present(struct ath12k *ar, return true; } +static bool +ath12k_mac_eht_mcs_range_present(struct ath12k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + u16 eht_mcs; + int i; + + for (i = 0; i < NL80211_EHT_NSS_MAX; i++) { + eht_mcs = mask->control[band].eht_mcs[i]; + + switch (eht_mcs) { + case 0: + case BIT(8) - 1: + case BIT(10) - 1: + case BIT(12) - 1: + case BIT(14) - 1: + break; + case BIT(15) - 1: + case BIT(16) - 1: + case BIT(16) - BIT(14) - 1: + if (i != 0) + return false; + break; + default: + return false; + } + } + + return true; +} + static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { @@ -12249,15 +12722,16 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban const struct cfg80211_bitrate_mask *mask, unsigned int link_id) { - bool he_fixed_rate = false, vht_fixed_rate = false; - const u16 *vht_mcs_mask, *he_mcs_mask; + bool eht_fixed_rate = false, he_fixed_rate = false, vht_fixed_rate = false; + const u16 *vht_mcs_mask, *he_mcs_mask, *eht_mcs_mask; struct ieee80211_link_sta *link_sta; struct ath12k_peer *peer, *tmp; - u8 vht_nss, he_nss; + u8 vht_nss, he_nss, eht_nss; int ret = true; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; + eht_mcs_mask = mask->control[band].eht_mcs; if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1) vht_fixed_rate = true; @@ -12265,11 +12739,15 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1) he_fixed_rate = true; - if (!vht_fixed_rate && !he_fixed_rate) + if (ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask) == 1) + eht_fixed_rate = true; + + if (!vht_fixed_rate && !he_fixed_rate && !eht_fixed_rate) return true; vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask); he_nss = ath12k_mac_max_he_nss(he_mcs_mask); + eht_nss = ath12k_mac_max_eht_nss(eht_mcs_mask); rcu_read_lock(); spin_lock_bh(&ar->ab->base_lock); @@ -12291,6 +12769,11 @@ ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band ban ret = false; goto exit; } + if (eht_fixed_rate && (!link_sta->eht_cap.has_eht || + link_sta->rx_nss < eht_nss)) { + ret = false; + goto exit; + } } } exit: @@ -12312,8 +12795,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; + const u16 *eht_mcs_mask; u8 he_ltf = 0; u8 he_gi = 0; + u8 eht_ltf = 0, eht_gi = 0; u32 rate; u8 nss, mac_nss; u8 sgi; @@ -12322,6 +12807,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, int ret; int num_rates; bool he_fixed_rate = false; + bool eht_fixed_rate = false; lockdep_assert_wiphy(hw->wiphy); @@ -12337,6 +12823,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ht_mcs_mask = mask->control[band].ht_mcs; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; + eht_mcs_mask = mask->control[band].eht_mcs; ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); sgi = mask->control[band].gi; @@ -12348,6 +12835,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, he_gi = mask->control[band].he_gi; he_ltf = mask->control[band].he_ltf; + eht_gi = mask->control[band].eht_gi; + eht_ltf = mask->control[band].eht_ltf; + /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it * requires passing at least one of used basic rates along with them. * Fixed rate setting across different preambles(legacy, HT, VHT) is @@ -12385,9 +12875,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ath12k_warn(ar->ab, "failed to update fixed rate settings due to mcs/nss incompatibility\n"); - mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask), - ath12k_mac_max_vht_nss(vht_mcs_mask), - ath12k_mac_max_he_nss(he_mcs_mask)); + mac_nss = max(max3(ath12k_mac_max_ht_nss(ht_mcs_mask), + ath12k_mac_max_vht_nss(vht_mcs_mask), + ath12k_mac_max_he_nss(he_mcs_mask)), + ath12k_mac_max_eht_nss(eht_mcs_mask)); nss = min_t(u32, ar->num_tx_chains, mac_nss); /* If multiple rates across different preambles are given @@ -12435,6 +12926,20 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ret = -EINVAL; goto out; } + + num_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band, + mask); + if (num_rates == 1) + eht_fixed_rate = true; + + if (!ath12k_mac_eht_mcs_range_present(ar, band, mask) && + num_rates > 1) { + ath12k_warn(ar->ab, + "Setting more than one EHT MCS Value in bitrate mask not supported\n"); + ret = -EINVAL; + goto out; + } + ieee80211_iterate_stations_mtx(hw, ath12k_mac_disable_peer_fixed_rate, arvif); @@ -12446,7 +12951,8 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, } ret = ath12k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi, - he_ltf, he_fixed_rate); + he_ltf, he_fixed_rate, eht_gi, eht_ltf, + eht_fixed_rate); if (ret) { ath12k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n", arvif->vdev_id, ret); @@ -12701,14 +13207,18 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, if (!signal && ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && - !(ath12k_mac_get_fw_stats(ar, ¶ms))) + !(ath12k_mac_get_fw_stats(ar, ¶ms))) { signal = arsta->rssi_beacon; + ath12k_fw_stats_reset(ar); + } params.stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) && ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && - !(ath12k_mac_get_fw_stats(ar, ¶ms))) + !(ath12k_mac_get_fw_stats(ar, ¶ms))) { ath12k_mac_put_chain_rssi(sinfo, arsta); + ath12k_fw_stats_reset(ar); + } spin_lock_bh(&ar->data_lock); noise_floor = ath12k_pdev_get_noise_floor(ar); @@ -12792,8 +13302,10 @@ static void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw, if (!signal && ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && - !(ath12k_mac_get_fw_stats(ar, ¶ms))) + !(ath12k_mac_get_fw_stats(ar, ¶ms))) { signal = arsta->rssi_beacon; + ath12k_fw_stats_reset(ar); + } if (signal) { link_sinfo->signal = @@ -12895,6 +13407,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, if (ret) { ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", ret); + ath12k_mac_unassign_link_vif(arvif); return ret; } } @@ -13894,6 +14407,11 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); + if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, + ab->wmi_ab.svc_map)) { + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR); + ieee80211_hw_set(hw, DETECTS_COLOR_COLLISION); + } wiphy->cipher_suites = cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index c05af40bd7a2..1f689e367c8a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH12K_MAC_H @@ -84,6 +84,18 @@ enum ath12k_supported_bw { ATH12K_BW_320 = 4, }; +enum ath12k_gi { + ATH12K_RATE_INFO_GI_0_8, + ATH12K_RATE_INFO_GI_1_6, + ATH12K_RATE_INFO_GI_3_2, +}; + +enum ath12k_ltf { + ATH12K_RATE_INFO_1XLTF, + ATH12K_RATE_INFO_2XLTF, + ATH12K_RATE_INFO_4XLTF, +}; + struct ath12k_mac_get_any_chanctx_conf_arg { struct ath12k *ar; struct ieee80211_chanctx_conf *chanctx_conf; diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index c729d5526c75..a12c8379cb46 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/module.h> @@ -218,6 +218,19 @@ static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); } +static void ath12k_pci_restore_window(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + + spin_lock_bh(&ab_pci->window_lock); + + iowrite32(WINDOW_ENABLE_BIT | ab_pci->register_window, + ab->mem + WINDOW_REG_ADDRESS); + ioread32(ab->mem + WINDOW_REG_ADDRESS); + + spin_unlock_bh(&ab_pci->window_lock); +} + static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) { u32 val, delay; @@ -242,6 +255,11 @@ static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); if (val == 0xffffffff) ath12k_warn(ab, "link down error during global reset\n"); + + /* Restore window register as its content is cleared during + * hardware global reset, such that it aligns with host cache. + */ + ath12k_pci_restore_window(ab); } static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) @@ -1871,3 +1889,7 @@ void ath12k_pci_exit(void) { pci_unregister_driver(&ath12k_pci_driver); } + +/* firmware files */ +MODULE_FIRMWARE(ATH12K_FW_DIR "/QCN9274/hw2.0/*"); +MODULE_FIRMWARE(ATH12K_FW_DIR "/WCN7850/hw2.0/*"); diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 36325e62aa24..b7c48b6706df 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/elf.h> @@ -3114,9 +3114,10 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab) if (!m3_mem->vaddr) return; - dma_free_coherent(ab->dev, m3_mem->size, + dma_free_coherent(ab->dev, m3_mem->total_size, m3_mem->vaddr, m3_mem->paddr); m3_mem->vaddr = NULL; + m3_mem->total_size = 0; m3_mem->size = 0; } @@ -3152,7 +3153,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */ if (m3_mem->vaddr) { - if (m3_mem->size >= m3_len) + if (m3_mem->total_size >= m3_len) goto skip_m3_alloc; /* Old buffer is too small, free and reallocate */ @@ -3164,11 +3165,13 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) GFP_KERNEL); if (!m3_mem->vaddr) { ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n", - fw->size); + m3_len); ret = -ENOMEM; goto out; } + m3_mem->total_size = m3_len; + skip_m3_alloc: memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; @@ -3740,7 +3743,7 @@ static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, sq->sq_node = service->node; sq->sq_port = service->port; - ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)sq, sizeof(*sq), 0); if (ret) { ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret); diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index 4767d9a2e309..7a88268aa1e9 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH12K_QMI_H @@ -120,6 +120,9 @@ struct target_info { }; struct m3_mem_region { + /* total memory allocated */ + u32 total_size; + /* actual memory being used */ u32 size; dma_addr_t paddr; void *vaddr; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index ff6b3d4ea820..be8b2943094f 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -14,6 +14,7 @@ #include <linux/uuid.h> #include <linux/time.h> #include <linux/of.h> +#include <linux/cleanup.h> #include "core.h" #include "debugfs.h" #include "debug.h" @@ -190,6 +191,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_11d_new_cc_event) }, [WMI_TAG_PER_CHAIN_RSSI_STATS] = { .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) }, + [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { + .min_len = sizeof(struct wmi_obss_color_collision_event) }, }; __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) @@ -2367,10 +2370,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); if (arg->vht_capable) { - mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate); - mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set); - mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate); - mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set); + /* Firmware interprets mcs->tx_mcs_set field as peer's + * RX capability + */ + mcs->rx_max_rate = cpu_to_le32(arg->tx_max_rate); + mcs->rx_mcs_set = cpu_to_le32(arg->tx_mcs_set); + mcs->tx_max_rate = cpu_to_le32(arg->rx_max_rate); + mcs->tx_mcs_set = cpu_to_le32(arg->rx_mcs_set); } /* HE Rates */ @@ -3848,6 +3854,58 @@ int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, } static void +ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb) +{ + const struct wmi_obss_color_collision_event *ev; + struct ath12k_link_vif *arvif; + u32 vdev_id, evt_type; + u64 bitmap; + + const void **tb __free(kfree) = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ath12k_warn(ab, "failed to parse OBSS color collision tlv %ld\n", + PTR_ERR(tb)); + return; + } + + ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch OBSS color collision event\n"); + return; + } + + vdev_id = le32_to_cpu(ev->vdev_id); + evt_type = le32_to_cpu(ev->evt_type); + bitmap = le64_to_cpu(ev->obss_color_bitmap); + + guard(rcu)(); + + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); + if (!arvif) { + ath12k_warn(ab, "no arvif found for vdev %u in OBSS color collision event\n", + vdev_id); + return; + } + + switch (evt_type) { + case WMI_BSS_COLOR_COLLISION_DETECTION: + ieee80211_obss_color_collision_notify(arvif->ahvif->vif, + bitmap, + arvif->link_id); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "obss color collision detected vdev %u event %d bitmap %016llx\n", + vdev_id, evt_type, bitmap); + break; + case WMI_BSS_COLOR_COLLISION_DISABLE: + case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: + break; + default: + ath12k_warn(ab, "unknown OBSS color collision event type %d\n", evt_type); + } +} + +static void ath12k_fill_band_to_mac_param(struct ath12k_base *soc, struct ath12k_wmi_pdev_band_arg *arg) { @@ -7011,12 +7069,26 @@ static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) { + struct ath12k_link_vif *arvif; + struct ath12k *ar; u32 vdev_id, tx_status; if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { ath12k_warn(ab, "failed to extract bcn tx status"); return; } + + guard(rcu)(); + + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); + if (!arvif) { + ath12k_warn(ab, "invalid vdev %u in bcn tx status\n", + vdev_id); + return; + } + + ar = arvif->ar; + wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work); } static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) @@ -8017,8 +8089,6 @@ void ath12k_wmi_fw_stats_dump(struct ath12k *ar, buf[len - 1] = 0; else buf[len] = 0; - - ath12k_fw_stats_reset(ar); } static void @@ -8415,18 +8485,10 @@ static void ath12k_wmi_fw_stats_process(struct ath12k *ar, ath12k_warn(ab, "empty beacon stats"); return; } - /* Mark end until we reached the count of all started VDEVs - * within the PDEV - */ - if (ar->num_started_vdevs) - is_end = ((++ar->fw_stats.num_bcn_recvd) == - ar->num_started_vdevs); list_splice_tail_init(&stats->bcn, &ar->fw_stats.bcn); - - if (is_end) - complete(&ar->fw_stats_done); + complete(&ar->fw_stats_done); } } @@ -9874,6 +9936,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID: ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb); break; + case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: + ath12k_wmi_obss_color_collision_event(ab, skb); + break; /* add Unsupported events (rare) here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: @@ -9884,7 +9949,6 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) /* add Unsupported events (frequent) here */ case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: case WMI_MGMT_RX_FW_CONSUMED_EVENTID: - case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: /* debug might flood hence silently ignore (no-op) */ break; case WMI_PDEV_UTF_EVENTID: diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index a8c3190e8ad9..f99fced1610e 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH12K_WMI_H @@ -223,15 +223,15 @@ enum WMI_HOST_WLAN_BAND { }; /* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command. - * Used only for HE auto rate mode. + * Used for HE and EHT auto rate mode. */ enum { - /* HE LTF related configuration */ - WMI_HE_AUTORATE_LTF_1X = BIT(0), - WMI_HE_AUTORATE_LTF_2X = BIT(1), - WMI_HE_AUTORATE_LTF_4X = BIT(2), + /* LTF related configuration */ + WMI_AUTORATE_LTF_1X = BIT(0), + WMI_AUTORATE_LTF_2X = BIT(1), + WMI_AUTORATE_LTF_4X = BIT(2), - /* HE GI related configuration */ + /* GI related configuration */ WMI_AUTORATE_400NS_GI = BIT(8), WMI_AUTORATE_800NS_GI = BIT(9), WMI_AUTORATE_1600NS_GI = BIT(10), @@ -1197,6 +1197,7 @@ enum wmi_tlv_vdev_param { WMI_VDEV_PARAM_SET_HEMU_MODE, WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003, WMI_VDEV_PARAM_SET_EHT_MU_MODE = 0x8005, + WMI_VDEV_PARAM_EHT_LTF, }; enum wmi_tlv_peer_flags { @@ -3609,20 +3610,6 @@ struct ath12k_wmi_scan_cancel_arg { u32 pdev_id; }; -struct wmi_bcn_send_from_host_cmd { - __le32 tlv_header; - __le32 vdev_id; - __le32 data_len; - union { - __le32 frag_ptr; - __le32 frag_ptr_lo; - }; - __le32 frame_ctrl; - __le32 dtim_flag; - __le32 bcn_antenna; - __le32 frag_ptr_hi; -}; - #define WMI_CHAN_INFO_MODE GENMASK(5, 0) #define WMI_CHAN_INFO_HT40_PLUS BIT(6) #define WMI_CHAN_INFO_PASSIVE BIT(7) @@ -4218,8 +4205,10 @@ struct wmi_unit_test_cmd { struct ath12k_wmi_vht_rate_set_params { __le32 tlv_header; __le32 rx_max_rate; + /* MCS at which the peer can transmit */ __le32 rx_mcs_set; __le32 tx_max_rate; + /* MCS at which the peer can receive */ __le32 tx_mcs_set; __le32 tx_max_mcs_nss; } __packed; @@ -4940,6 +4929,24 @@ struct wmi_obss_spatial_reuse_params_cmd { #define ATH12K_BSS_COLOR_STA_PERIODS 10000 #define ATH12K_BSS_COLOR_AP_PERIODS 5000 +/** + * enum wmi_bss_color_collision - Event types for BSS color collision handling + * @WMI_BSS_COLOR_COLLISION_DISABLE: Indicates that BSS color collision detection + * is disabled. + * @WMI_BSS_COLOR_COLLISION_DETECTION: Event triggered when a BSS color collision + * is detected. + * @WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: Event indicating that the timer for waiting + * on a free BSS color slot has expired. + * @WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: Event indicating that a free BSS color slot + * has become available. + */ +enum wmi_bss_color_collision { + WMI_BSS_COLOR_COLLISION_DISABLE = 0, + WMI_BSS_COLOR_COLLISION_DETECTION, + WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY, + WMI_BSS_COLOR_FREE_SLOT_AVAILABLE, +}; + struct wmi_obss_color_collision_cfg_params_cmd { __le32 tlv_header; __le32 vdev_id; @@ -4957,6 +4964,12 @@ struct wmi_bss_color_change_enable_params_cmd { __le32 enable; } __packed; +struct wmi_obss_color_collision_event { + __le32 vdev_id; + __le32 evt_type; + __le64 obss_color_bitmap; +} __packed; + #define ATH12K_IPV4_TH_SEED_SIZE 5 #define ATH12K_IPV6_TH_SEED_SIZE 11 diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c index dce9bd0bcaef..e8481626f194 100644 --- a/drivers/net/wireless/ath/ath12k/wow.c +++ b/drivers/net/wireless/ath/ath12k/wow.c @@ -758,6 +758,7 @@ static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) if (ret) { ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", arvif->vdev_id, enable, ret); + kfree(offload); return ret; } } diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index d3a9d00e65e1..ef9ea4ff891b 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -4484,80 +4484,6 @@ struct set_rssi_filter_resp { u32 status; }; -/* Update scan params - sent from host to PNO to be used during PNO - * scanningx */ -struct wcn36xx_hal_update_scan_params_req { - - struct wcn36xx_hal_msg_header header; - - /* Host setting for 11d */ - u8 dot11d_enabled; - - /* Lets PNO know that host has determined the regulatory domain */ - u8 dot11d_resolved; - - /* Channels on which PNO is allowed to scan */ - u8 channel_count; - u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS]; - - /* Minimum channel time */ - u16 active_min_ch_time; - - /* Maximum channel time */ - u16 active_max_ch_time; - - /* Minimum channel time */ - u16 passive_min_ch_time; - - /* Maximum channel time */ - u16 passive_max_ch_time; - - /* Cb State */ - enum phy_chan_bond_state state; -} __packed; - -/* Update scan params - sent from host to PNO to be used during PNO - * scanningx */ -struct wcn36xx_hal_update_scan_params_req_ex { - - struct wcn36xx_hal_msg_header header; - - /* Host setting for 11d */ - u8 dot11d_enabled; - - /* Lets PNO know that host has determined the regulatory domain */ - u8 dot11d_resolved; - - /* Channels on which PNO is allowed to scan */ - u8 channel_count; - u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; - - /* Minimum channel time */ - u16 active_min_ch_time; - - /* Maximum channel time */ - u16 active_max_ch_time; - - /* Minimum channel time */ - u16 passive_min_ch_time; - - /* Maximum channel time */ - u16 passive_max_ch_time; - - /* Cb State */ - enum phy_chan_bond_state state; -} __packed; - -/* Update scan params - sent from host to PNO to be used during PNO - * scanningx */ -struct wcn36xx_hal_update_scan_params_resp { - - struct wcn36xx_hal_msg_header header; - - /* status of the request */ - u32 status; -} __packed; - struct wcn36xx_hal_set_tx_per_tracking_req_msg { struct wcn36xx_hal_msg_header header; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 2cf86fc3f8fe..136acc414714 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1127,66 +1127,6 @@ out_nomem: return ret; } -static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) -{ - struct wcn36xx_hal_update_scan_params_resp *rsp; - - rsp = buf; - - /* Remove the PNO version bit */ - rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK)); - - if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) { - wcn36xx_warn("error response from update scan\n"); - return rsp->status; - } - - return 0; -} - -int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, - u8 *channels, size_t channel_count) -{ - struct wcn36xx_hal_update_scan_params_req_ex msg_body; - int ret; - - mutex_lock(&wcn->hal_mutex); - INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ); - - msg_body.dot11d_enabled = false; - msg_body.dot11d_resolved = true; - - msg_body.channel_count = channel_count; - memcpy(msg_body.channels, channels, channel_count); - msg_body.active_min_ch_time = 60; - msg_body.active_max_ch_time = 120; - msg_body.passive_min_ch_time = 60; - msg_body.passive_max_ch_time = 110; - msg_body.state = PHY_SINGLE_CHANNEL_CENTERED; - - PREPARE_HAL_BUF(wcn->hal_buf, msg_body); - - wcn36xx_dbg(WCN36XX_DBG_HAL, - "hal update scan params channel_count %d\n", - msg_body.channel_count); - - ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); - if (ret) { - wcn36xx_err("Sending hal_update_scan_params failed\n"); - goto out; - } - ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf, - wcn->hal_rsp_len); - if (ret) { - wcn36xx_err("hal_update_scan_params response failed err=%d\n", - ret); - goto out; - } -out: - mutex_unlock(&wcn->hal_mutex); - return ret; -} - static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, struct ieee80211_vif *vif, void *buf, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 2c1ed9e570bf..4e39df5589b3 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -66,7 +66,6 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, struct ieee80211_vif *vif); -int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct cfg80211_scan_request *req); int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn); diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index f521af575e9b..c866cfd144c7 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -458,6 +458,5 @@ void wil_pm_runtime_put(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index ca488931a33c..f0453f3f6ba6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -38,7 +38,6 @@ static const struct iwl_family_base_params iwl_22000_base = { .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .apmg_not_supported = true, .mac_addr_from_csr = 0x380, - .min_umac_error_event_table = 0x400000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 60 * 1024, .mon_smem_regs = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index b56574006ee0..3c844cd419e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -50,7 +50,6 @@ static const struct iwl_family_base_params iwl8000_base = { .smem_offset = IWL8260_SMEM_OFFSET, .smem_len = IWL8260_SMEM_LEN, .apmg_not_supported = true, - .min_umac_error_event_table = 0x800000, }; static const struct iwl_tt_params iwl8000_tt_params = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index ac1fa291cf2f..5872fc9b8caf 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -41,7 +41,6 @@ static const struct iwl_family_base_params iwl9000_base = { .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .apmg_not_supported = true, .mac_addr_from_csr = 0x380, - .min_umac_error_event_table = 0x800000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 92 * 1024, .nvm_hw_section_num = 10, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index ddf3d313da5a..582f61661062 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -33,7 +33,6 @@ static const struct iwl_family_base_params iwl_ax210_base = { .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .apmg_not_supported = true, .mac_addr_from_csr = 0x380, - .min_umac_error_event_table = 0x400000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 60 * 1024, .mon_smem_regs = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index 3e6206e739f6..d25445bd1e5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware core release supported */ -#define IWL_BZ_UCODE_CORE_MAX 99 +#define IWL_BZ_UCODE_CORE_MAX 101 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 100 @@ -38,7 +38,6 @@ static const struct iwl_family_base_params iwl_bz_base = { .smem_len = IWL_BZ_SMEM_LEN, .apmg_not_supported = true, .mac_addr_from_csr = 0x30, - .min_umac_error_event_table = 0xD0000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 60 * 1024, .mon_smem_regs = { @@ -90,6 +89,7 @@ const struct iwl_mac_cfg iwl_bz_mac_cfg = { .low_latency_xtal = true, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_bz_mac_cfg); const struct iwl_mac_cfg iwl_gl_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c index e53a785686c8..a279dcfd3083 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c @@ -9,7 +9,7 @@ #include "fw/api/txq.h" /* Highest firmware core release supported */ -#define IWL_DR_UCODE_CORE_MAX 99 +#define IWL_DR_UCODE_CORE_MAX 101 /* Lowest firmware API version supported */ #define IWL_DR_UCODE_API_MIN 100 @@ -33,7 +33,6 @@ static const struct iwl_family_base_params iwl_dr_base = { .smem_len = IWL_DR_SMEM_LEN, .apmg_not_supported = true, .mac_addr_from_csr = 0x30, - .min_umac_error_event_table = 0xD0000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 60 * 1024, .mon_smem_regs = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c index 456a666c8dfd..fd82050e33a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c @@ -19,6 +19,7 @@ .non_shared_ant = ANT_B, \ .vht_mu_mimo_supported = true, \ .uhb_supported = true, \ + .eht_supported = true, \ .num_rbds = IWL_NUM_RBDS_EHT, \ .nvm_ver = IWL_FM_NVM_VERSION, \ .nvm_type = IWL_NVM_EXT diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c index 483f21659eff..408b9850bd10 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c @@ -12,5 +12,6 @@ const char iwl_killer_bn1850i_name[] = "Killer(R) Wi-Fi 8 BN1850i 320MHz Wireless Network Adapter (BN201.NGW)"; const char iwl_bn201_name[] = "Intel(R) Wi-Fi 8 BN201"; +const char iwl_bn203_name[] = "Intel(R) Wi-Fi 8 BN203"; const char iwl_be221_name[] = "Intel(R) Wi-Fi 7 BE221"; const char iwl_be223_name[] = "Intel(R) Wi-Fi 7 BE223"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c index 97735175cb0e..b5803ea1eb78 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c @@ -4,8 +4,31 @@ */ #include "iwl-config.h" +/* NVM versions */ +#define IWL_WH_NVM_VERSION 0x0a1d + +#define IWL_DEVICE_WH \ + .ht_params = { \ + .stbc = true, \ + .ldpc = true, \ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | \ + BIT(NL80211_BAND_5GHZ), \ + }, \ + .led_mode = IWL_LED_RF_STATE, \ + .non_shared_ant = ANT_B, \ + .vht_mu_mimo_supported = true, \ + .uhb_supported = true, \ + .num_rbds = IWL_NUM_RBDS_EHT, \ + .nvm_ver = IWL_WH_NVM_VERSION, \ + .nvm_type = IWL_NVM_EXT + /* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */ +const struct iwl_rf_cfg iwl_rf_wh_non_eht = { + IWL_DEVICE_WH, + .eht_supported = false, +}; + const char iwl_killer_be1775s_name[] = "Killer(R) Wi-Fi 7 BE1775s 320MHz Wireless Network Adapter (BE211D2W)"; const char iwl_killer_be1775i_name[] = @@ -13,3 +36,4 @@ const char iwl_killer_be1775i_name[] = const char iwl_be211_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz"; const char iwl_be213_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; +const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index e9449b59114a..ee00b2af7a1d 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware core release supported */ -#define IWL_SC_UCODE_CORE_MAX 99 +#define IWL_SC_UCODE_CORE_MAX 101 /* Lowest firmware API version supported */ #define IWL_SC_UCODE_API_MIN 100 @@ -41,7 +41,6 @@ static const struct iwl_family_base_params iwl_sc_base = { .smem_len = IWL_SC_SMEM_LEN, .apmg_not_supported = true, .mac_addr_from_csr = 0x30, - .min_umac_error_event_table = 0xD0000, .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 60 * 1024, .mon_smem_regs = { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 20bc6671f4eb..06cece4ea6d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -151,6 +151,7 @@ union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev, * @mcc: output buffer (3 bytes) that will get the MCC * * This function tries to read the current MCC from ACPI if available. + * Return: 0 on success, or a negative error code */ int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index ad5b95cad0bf..ea2ba4b4cb7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -88,7 +88,7 @@ struct iwl_imr_alive_info { __le32 enabled; } __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */ -struct iwl_alive_ntf_v6 { +struct iwl_alive_ntf_v7 { __le16 status; __le16 flags; struct iwl_lmac_alive lmac_data[2]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h index d130d4f85444..073f003bdc5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -98,7 +98,7 @@ struct iwl_cmd_header { } __packed; /** - * struct iwl_cmd_header_wide + * struct iwl_cmd_header_wide - wide command header * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index ddc84430d895..616f00a8b603 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2023-2024 Intel Corporation + * Copyright (C) 2023-2025 Intel Corporation * Copyright (C) 2013-2014, 2018-2019 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH @@ -52,7 +52,7 @@ struct iwl_bt_coex_cmd { } __packed; /* BT_COEX_CMD_API_S_VER_6 */ /** - * struct iwl_bt_coex_reduced_txp_update_cmd + * struct iwl_bt_coex_reduced_txp_update_cmd - reduced TX power command * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the * bits are the sta_id (value) */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 997b0c9ce984..8d64a271bb94 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -60,7 +60,7 @@ enum iwl_legacy_cmds { * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or - * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6. + * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v7. */ UCODE_ALIVE_NTFY = 0x1, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index b1c6ee8ae2df..6a6e11a57dbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -124,6 +124,11 @@ enum iwl_data_path_subcmd_ids { BEACON_FILTER_IN_NOTIF = 0xF8, /** + * @PHY_AIR_SNIFFER_NOTIF: &struct iwl_rx_phy_air_sniffer_ntfy + */ + PHY_AIR_SNIFFER_NOTIF = 0xF9, + + /** * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification */ STA_PM_NOTIF = 0xFD, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 3173fa96cb48..b62f0687327a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -16,7 +16,7 @@ #define IWL_FW_INI_PRESET_DISABLE 0xff /** - * struct iwl_fw_ini_hcmd + * struct iwl_fw_ini_hcmd - debug configuration host command * * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC * @group: the desired cmd group @@ -199,7 +199,7 @@ struct iwl_fw_ini_region_tlv { } __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */ /** - * struct iwl_fw_ini_debug_info_tlv + * struct iwl_fw_ini_debug_info_tlv - debug info TLV * * debug configuration name for a specific image * @@ -311,7 +311,7 @@ struct iwl_fw_ini_conf_set_tlv { } __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */ /** - * enum iwl_fw_ini_config_set_type + * enum iwl_fw_ini_config_set_type - configuration set type * * @IWL_FW_INI_CONFIG_SET_TYPE_INVALID: invalid config set * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: for PERIPHERY MAC configuration @@ -337,7 +337,7 @@ enum iwl_fw_ini_config_set_type { } __packed; /** - * enum iwl_fw_ini_allocation_id + * enum iwl_fw_ini_allocation_id - allocation ID * * @IWL_FW_INI_ALLOCATION_INVALID: invalid * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration @@ -356,7 +356,7 @@ enum iwl_fw_ini_allocation_id { }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ /** - * enum iwl_fw_ini_buffer_location + * enum iwl_fw_ini_buffer_location - buffer location * * @IWL_FW_INI_LOCATION_INVALID: invalid * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location @@ -373,7 +373,7 @@ enum iwl_fw_ini_buffer_location { }; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** - * enum iwl_fw_ini_region_type + * enum iwl_fw_ini_region_type - region type * * @IWL_FW_INI_REGION_INVALID: invalid * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs @@ -437,7 +437,7 @@ enum iwl_fw_ini_region_device_memory_subtype { }; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */ /** - * enum iwl_fw_ini_time_point + * enum iwl_fw_ini_time_point - time point type * * Hard coded time points in which the driver can send hcmd or perform dump * collection diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 0cf1e5124fba..61a850de26fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -421,7 +421,7 @@ struct iwl_dbgc1_info { } __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */ /** - * struct iwl_dbg_host_event_cfg_cmd + * struct iwl_dbg_host_event_cfg_cmd - host event config command * @enabled_severities: enabled severities */ struct iwl_dbg_host_event_cfg_cmd { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 33541f92c7c7..2ee3a48aa5df 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -1092,7 +1092,7 @@ struct iwl_tof_range_req_ap_entry { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ /** - * enum iwl_tof_response_mode + * enum iwl_tof_response_mode - TOF response mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon @@ -1108,7 +1108,7 @@ enum iwl_tof_response_mode { }; /** - * enum iwl_tof_initiator_flags + * enum iwl_tof_initiator_flags - TOF initiator flags * * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run * the algo on ant A+B, instead of only one of them. @@ -1409,7 +1409,7 @@ enum iwl_tof_range_request_status { }; /** - * enum iwl_tof_entry_status + * enum iwl_tof_entry_status - TOF entry status * * @IWL_TOF_ENTRY_SUCCESS: successful measurement. * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure. @@ -1856,7 +1856,7 @@ struct iwl_tof_mcsi_notif { } __packed; /** - * struct iwl_tof_range_abort_cmd + * struct iwl_tof_range_abort_cmd - TOF range abort command * @request_id: corresponds to a range request * @reserved: reserved */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index e90f3187e55c..4644fc1aa1ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -18,13 +18,8 @@ enum iwl_regulatory_and_nvm_subcmd_ids { /** * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd_v1, - * &struct iwl_lari_config_change_cmd_v2, - * &struct iwl_lari_config_change_cmd_v3, - * &struct iwl_lari_config_change_cmd_v4, - * &struct iwl_lari_config_change_cmd_v5, * &struct iwl_lari_config_change_cmd_v6, - * &struct iwl_lari_config_change_cmd_v7, - * &struct iwl_lari_config_change_cmd_v10 or + * &struct iwl_lari_config_change_cmd_v8, * &struct iwl_lari_config_change_cmd */ LARI_CONFIG_CHANGE = 0x1, @@ -565,74 +560,6 @@ struct iwl_lari_config_change_cmd_v1 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */ /** - * struct iwl_lari_config_change_cmd_v2 - change LARI configuration - * @config_bitmap: bit map of the config commands. each bit will trigger a - * different predefined FW config operation - * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets - */ -struct iwl_lari_config_change_cmd_v2 { - __le32 config_bitmap; - __le32 oem_uhb_allow_bitmap; -} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */ - -/** - * struct iwl_lari_config_change_cmd_v3 - change LARI configuration - * @config_bitmap: bit map of the config commands. each bit will trigger a - * different predefined FW config operation - * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets - * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs. - * For each supported country, a pair of regulatory override bit and 11ax mode exist - * in the bit field. - */ -struct iwl_lari_config_change_cmd_v3 { - __le32 config_bitmap; - __le32 oem_uhb_allow_bitmap; - __le32 oem_11ax_allow_bitmap; -} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */ - -/** - * struct iwl_lari_config_change_cmd_v4 - change LARI configuration - * @config_bitmap: Bitmap of the config commands. Each bit will trigger a - * different predefined FW config operation. - * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. - * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits - * per country, one to indicate whether to override and the other to - * indicate the value to use. - * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits - * per country, one to indicate whether to override and the other to - * indicate allow/disallow unii4 channels. - */ -struct iwl_lari_config_change_cmd_v4 { - __le32 config_bitmap; - __le32 oem_uhb_allow_bitmap; - __le32 oem_11ax_allow_bitmap; - __le32 oem_unii4_allow_bitmap; -} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */ - -/** - * struct iwl_lari_config_change_cmd_v5 - change LARI configuration - * @config_bitmap: Bitmap of the config commands. Each bit will trigger a - * different predefined FW config operation. - * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. - * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits - * per country, one to indicate whether to override and the other to - * indicate the value to use. - * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits - * per country, one to indicate whether to override and the other to - * indicate allow/disallow unii4 channels. - * @chan_state_active_bitmap: Bitmap for overriding channel state to active. - * Each bit represents a country or region to activate, according to the BIOS - * definitions. - */ -struct iwl_lari_config_change_cmd_v5 { - __le32 config_bitmap; - __le32 oem_uhb_allow_bitmap; - __le32 oem_11ax_allow_bitmap; - __le32 oem_unii4_allow_bitmap; - __le32 chan_state_active_bitmap; -} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */ - -/** * struct iwl_lari_config_change_cmd_v6 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. @@ -659,8 +586,7 @@ struct iwl_lari_config_change_cmd_v6 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */ /** - * struct iwl_lari_config_change_cmd_v7 - change LARI configuration - * This structure is used also for lari cmd version 8 and 9. + * struct iwl_lari_config_change_cmd_v8 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. @@ -670,21 +596,19 @@ struct iwl_lari_config_change_cmd_v6 { * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. - * For LARI cmd version 4 to 8 - bits 0:3 are supported. - * For LARI cmd version 9 - bits 0:5 are supported. + * bit 0 - 3: supported. * @chan_state_active_bitmap: Bitmap to enable different bands per country * or region. * Each bit represents a country or region, and a band to activate * according to the BIOS definitions. - * For LARI cmd version 7 - bits 0:3 are supported. - * For LARI cmd version 8 - bits 0:4 are supported. + * bit 0 - 4: supported. * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. * Each bit represents a set of channels in a specific band that should be * disabled * @edt_bitmap: Bitmap of energy detection threshold table. * Disable/enable the EDT optimization method for different band. */ -struct iwl_lari_config_change_cmd_v7 { +struct iwl_lari_config_change_cmd_v8 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; @@ -693,48 +617,8 @@ struct iwl_lari_config_change_cmd_v7 { __le32 force_disable_channels_bitmap; __le32 edt_bitmap; } __packed; -/* LARI_CHANGE_CONF_CMD_S_VER_7 */ /* LARI_CHANGE_CONF_CMD_S_VER_8 */ -/* LARI_CHANGE_CONF_CMD_S_VER_9 */ -/** - * struct iwl_lari_config_change_cmd_v10 - change LARI configuration - * @config_bitmap: Bitmap of the config commands. Each bit will trigger a - * different predefined FW config operation. - * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. - * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits - * per country, one to indicate whether to override and the other to - * indicate the value to use. - * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits - * per country, one to indicate whether to override and the other to - * indicate allow/disallow unii4 channels. - * For LARI cmd version 10 - bits 0:5 are supported. - * @chan_state_active_bitmap: Bitmap to enable different bands per country - * or region. - * Each bit represents a country or region, and a band to activate - * according to the BIOS definitions. - * For LARI cmd version 10 - bits 0:4 are supported. - * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. - * Each bit represents a set of channels in a specific band that should be - * disabled - * @edt_bitmap: Bitmap of energy detection threshold table. - * Disable/enable the EDT optimization method for different band. - * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC. - * bit0: enable 320Mhz in Japan. - * bit1: enable 320Mhz in South Korea. - * bit 2 - 31: reserved. - */ -struct iwl_lari_config_change_cmd_v10 { - __le32 config_bitmap; - __le32 oem_uhb_allow_bitmap; - __le32 oem_11ax_allow_bitmap; - __le32 oem_unii4_allow_bitmap; - __le32 chan_state_active_bitmap; - __le32 force_disable_channels_bitmap; - __le32 edt_bitmap; - __le32 oem_320mhz_allow_bitmap; -} __packed; -/* LARI_CHANGE_CONF_CMD_S_VER_10 */ /** * struct iwl_lari_config_change_cmd - change LARI configuration @@ -747,14 +631,11 @@ struct iwl_lari_config_change_cmd_v10 { * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. - * For LARI cmd version 11 - bits 0:5 are supported. * @chan_state_active_bitmap: Bitmap to enable different bands per country * or region. * Each bit represents a country or region, and a band to activate * according to the BIOS definitions. - * For LARI cmd version 11 - bits 0:4 are supported. - * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are - * reserved. + * bit 0 - 6: supported. * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. * Each bit represents a set of channels in a specific band that should be * disabled @@ -781,12 +662,11 @@ struct iwl_lari_config_change_cmd { __le32 oem_320mhz_allow_bitmap; __le32 oem_11be_allow_bitmap; } __packed; -/* LARI_CHANGE_CONF_CMD_S_VER_11 */ /* LARI_CHANGE_CONF_CMD_S_VER_12 */ /* Activate UNII-1 (5.2GHz) for World Wide */ #define ACTIVATE_5G2_IN_WW_MASK BIT(4) -#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F +#define CHAN_STATE_ACTIVE_BITMAP_CMD_V8 0x1F #define CHAN_STATE_ACTIVE_BITMAP_CMD_V12 0x7F /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 5eb8d10678fd..535864e22626 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -620,7 +620,7 @@ struct iwl_sar_offset_mapping_cmd { } __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ /** - * struct iwl_beacon_filter_cmd + * struct iwl_beacon_filter_cmd - beacon filter command * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon * to driver if delta in Energy values calculated for this and last @@ -762,7 +762,7 @@ enum iwl_6ghz_ap_type { }; /* PHY_AP_TYPE_API_E_VER_1 */ /** - * struct iwl_txpower_constraints_cmd + * struct iwl_txpower_constraints_cmd - TX power constraints command * AP_TX_POWER_CONSTRAINTS_CMD * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels * @link_id: linkId @@ -786,4 +786,5 @@ struct iwl_txpower_constraints_cmd { __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE]; u8 reserved[3]; } __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index d751789998ac..3ed7e0807b90 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -262,6 +262,7 @@ enum iwl_rx_mpdu_reorder_data { }; enum iwl_rx_mpdu_phy_info { + IWL_RX_MPDU_PHY_EOF_INDICATION = BIT(0), IWL_RX_MPDU_PHY_AMPDU = BIT(5), IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), @@ -1041,4 +1042,289 @@ struct iwl_beacon_filter_notif { __le32 link_id; } __packed; /* BEACON_FILTER_IN_NTFY_API_S_VER_2 */ +union iwl_legacy_sig { +#define OFDM_RX_LEGACY_LENGTH 0x00000fff +#define OFDM_RX_RATE 0x0000f000 + __le32 ofdm; +#define CCK_CRFR_SHORT_PREAMBLE 0x00000040 + __le32 cck; +}; + +struct iwl_ht_sigs { +#define OFDM_RX_FRAME_HT_MCS 0x0000007f +#define OFDM_RX_FRAME_HT_BANDWIDTH 0x00000080 +#define OFDM_RX_FRAME_HT_LENGTH 0x03ffff00 + __le32 a1; + __le32 a2; +}; + +struct iwl_vht_sigs { +#define OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM 0x000007ff +#define OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM_VALID 0x80000000 + __le32 a0; + __le32 a1, a2; +}; + +struct iwl_he_sigs { +#define OFDM_RX_FRAME_HE_BEAM_CHANGE 0x00000001 +#define OFDM_RX_FRAME_HE_UL_FLAG 0x00000002 +#define OFDM_RX_FRAME_HE_MCS 0x0000003c +#define OFDM_RX_FRAME_HE_DCM 0x00000040 +#define OFDM_RX_FRAME_HE_BSS_COLOR 0x00001f80 +#define OFDM_RX_FRAME_HE_SPATIAL_REUSE 0x0001e000 +#define OFDM_RX_FRAME_HE_BANDWIDTH 0x00060000 +#define OFDM_RX_FRAME_HE_SU_EXT_BW10 0x00080000 +#define OFDM_RX_FRAME_HE_GI_LTF_TYPE 0x00700000 +#define OFDM_RX_FRAME_HE_NSTS 0x03800000 +#define OFDM_RX_FRAME_HE_PRMBL_PUNC_TYPE 0x0c000000 + __le32 a1; +#define OFDM_RX_FRAME_HE_TXOP_DURATION 0x0000007f +#define OFDM_RX_FRAME_HE_CODING 0x00000080 +#define OFDM_RX_FRAME_HE_CODING_EXTRA_SYM 0x00000100 +#define OFDM_RX_FRAME_HE_STBC 0x00000200 +#define OFDM_RX_FRAME_HE_BF 0x00000400 +#define OFDM_RX_FRAME_HE_PRE_FEC_PAD_FACTOR 0x00001800 +#define OFDM_RX_FRAME_HE_PE_DISAMBIG 0x00002000 +#define OFDM_RX_FRAME_HE_DOPPLER 0x00004000 +#define OFDM_RX_FRAME_HE_TYPE 0x00038000 +#define OFDM_RX_FRAME_HE_MU_NUM_OF_SIGB_SYM_OR_USER_NUM 0x003c0000 +#define OFDM_RX_FRAME_HE_MU_SIGB_COMP 0x00400000 +#define OFDM_RX_FRAME_HE_MU_NUM_OF_LTF_SYM 0x03800000 + __le32 a2; +#define OFDM_RX_FRAME_HE_NUM_OF_DATA_SYM 0x000007ff +#define OFDM_RX_FRAME_HE_PE_DURATION 0x00003800 +#define OFDM_RX_FRAME_HE_NUM_OF_DATA_SYM_VALID 0x80000000 + __le32 a3; +#define OFDM_RX_FRAME_HE_SIGB_STA_ID_FOUND 0x00000001 +#define OFDM_RX_FRAME_HE_SIGB_STA_ID_INDX 0x0000000e +#define OFDM_RX_FRAME_HE_SIGB_NSTS 0x00000070 +#define OFDM_RX_FRAME_HE_SIGB_BF 0x00000080 +#define OFDM_RX_FRAME_HE_SIGB_MCS 0x00000f00 +#define OFDM_RX_FRAME_HE_SIGB_DCM 0x00001000 +#define OFDM_RX_FRAME_HE_SIGB_CODING 0x00002000 +#define OFDM_RX_FRAME_HE_SIGB_SPATIAL_CONFIG 0x0003c000 +#define OFDM_RX_FRAME_HE_SIGB_STA_RU 0x03fc0000 +#define OFDM_RX_FRAME_HE_SIGB_NUM_OF_SYM 0x3c000000 +#define OFDM_RX_FRAME_HE_SIGB_CRC_OK 0x40000000 + __le32 b; +/* index 0 */ +#define OFDM_RX_FRAME_HE_RU_ALLOC_0_A1 0x000000ff +#define OFDM_RX_FRAME_HE_RU_ALLOC_0_A2 0x0000ff00 +#define OFDM_RX_FRAME_HE_RU_ALLOC_0_B1 0x00ff0000 +#define OFDM_RX_FRAME_HE_RU_ALLOC_0_B2 0xff000000 +/* index 1 */ +#define OFDM_RX_FRAME_HE_RU_ALLOC_1_C1 0x000000ff +#define OFDM_RX_FRAME_HE_RU_ALLOC_1_C2 0x0000ff00 +#define OFDM_RX_FRAME_HE_RU_ALLOC_1_D1 0x00ff0000 +#define OFDM_RX_FRAME_HE_RU_ALLOC_1_D2 0xff000000 +/* index 2 */ +#define OFDM_RX_FRAME_HE_CENTER_RU_CC1 0x00000001 +#define OFDM_RX_FRAME_HE_CENTER_RU_CC2 0x00000002 +#define OFDM_RX_FRAME_HE_COMMON_CC1_CRC_OK 0x00000004 +#define OFDM_RX_FRAME_HE_COMMON_CC2_CRC_OK 0x00000008 + __le32 cmn[3]; +}; + +struct iwl_he_tb_sigs { +#define OFDM_RX_HE_TRIG_FORMAT 0x00000001 +#define OFDM_RX_HE_TRIG_BSS_COLOR 0x0000007e +#define OFDM_RX_HE_TRIG_SPATIAL_REUSE_1 0x00000780 +#define OFDM_RX_HE_TRIG_SPATIAL_REUSE_2 0x00007800 +#define OFDM_RX_HE_TRIG_SPATIAL_REUSE_3 0x00078000 +#define OFDM_RX_HE_TRIG_SPATIAL_REUSE_4 0x00780000 +#define OFDM_RX_HE_TRIG_BANDWIDTH 0x03000000 + __le32 a1; +#define OFDM_RX_HE_TRIG_TXOP_DURATION 0x0000007f +#define OFDM_RX_HE_TRIG_SIG2_RESERVED 0x0000ff80 +#define OFDM_RX_HE_TRIG_FORMAT_ERR 0x08000000 +#define OFDM_RX_HE_TRIG_BW_ERR 0x10000000 +#define OFDM_RX_HE_TRIG_LEGACY_LENGTH_ERR 0x20000000 +#define OFDM_RX_HE_TRIG_CRC_OK 0x40000000 + __le32 a2; +#define OFDM_UCODE_TRIG_BASE_RX_LGCY_LENGTH 0x00000fff +#define OFDM_UCODE_TRIG_BASE_RX_BANDWIDTH 0x00007000 +#define OFDM_UCODE_TRIG_BASE_PS160 0x00008000 +#define OFDM_UCODE_EHT_TRIG_CONTROL_CHANNEL 0x000f0000 + __le32 tb_rx0; +#define OFDM_UCODE_TRIG_BASE_RX_MCS 0x0000000f +#define OFDM_UCODE_TRIG_BASE_RX_DCM 0x00000010 +#define OFDM_UCODE_TRIG_BASE_RX_GI_LTF_TYPE 0x00000060 +#define OFDM_UCODE_TRIG_BASE_RX_NSTS 0x00000380 +#define OFDM_UCODE_TRIG_BASE_RX_CODING 0x00000400 +#define OFDM_UCODE_TRIG_BASE_RX_CODING_EXTRA_SYM 0x00000800 +#define OFDM_UCODE_TRIG_BASE_RX_STBC 0x00001000 +#define OFDM_UCODE_TRIG_BASE_RX_PRE_FEC_PAD_FACTOR 0x00006000 +#define OFDM_UCODE_TRIG_BASE_RX_PE_DISAMBIG 0x00008000 +#define OFDM_UCODE_TRIG_BASE_RX_DOPPLER 0x00010000 +#define OFDM_UCODE_TRIG_BASE_RX_RU 0x01fe0000 +#define OFDM_UCODE_TRIG_BASE_RX_RU_P80 0x00020000 +#define OFDM_UCODE_TRIG_BASE_RX_NUM_OF_LTF_SYM 0x0e000000 +#define OFDM_UCODE_TRIG_BASE_RX_LTF_PILOT_TYPE 0x10000000 +#define OFDM_UCODE_TRIG_BASE_RX_LOWEST_SS_ALLOCATION 0xe0000000 + __le32 tb_rx1; +}; + +struct iwl_eht_sigs { +#define OFDM_RX_FRAME_ENHANCED_WIFI_VER_ID 0x00000007 +#define OFDM_RX_FRAME_ENHANCED_WIFI_BANDWIDTH 0x00000038 +#define OFDM_RX_FRAME_ENHANCED_WIFI_UL_FLAG 0x00000040 +#define OFDM_RX_FRAME_ENHANCED_WIFI_BSS_COLOR 0x00001f80 +#define OFDM_RX_FRAME_ENHANCED_WIFI_TXOP_DURATION 0x000fe000 +#define OFDM_RX_FRAME_EHT_USIG1_DISREGARD 0x01f00000 +#define OFDM_RX_FRAME_EHT_USIG1_VALIDATE 0x02000000 +#define OFDM_RX_FRAME_EHT_BW320_SLOT 0x04000000 +#define OFDM_RX_FRAME_EHT_TYPE 0x18000000 +#define OFDM_RX_FRAME_ENHANCED_ER_NO_STREAMS 0x20000000 + __le32 usig_a1; +#define OFDM_RX_FRAME_EHT_PPDU_TYPE 0x00000003 +#define OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B2 0x00000004 +#define OFDM_RX_FRAME_EHT_PUNC_CHANNEL 0x000000f8 +#define OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B8 0x00000100 +#define OFDM_RX_FRAME_EHT_SIG_MCS 0x00000600 +#define OFDM_RX_FRAME_EHT_SIG_SYM_NUM 0x0000f800 +#define OFDM_RX_FRAME_EHT_TRIG_SPATIAL_REUSE_1 0x000f0000 +#define OFDM_RX_FRAME_EHT_TRIG_SPATIAL_REUSE_2 0x00f00000 +#define OFDM_RX_FRAME_EHT_TRIG_USIG2_DISREGARD 0x1f000000 +#define OFDM_RX_FRAME_EHT_TRIG_NO_STREAMS 0x20000000 +#define OFDM_RX_USIG_CRC_OK 0x40000000 + __le32 usig_a2_eht; +#define OFDM_RX_FRAME_EHT_SPATIAL_REUSE 0x0000000f +#define OFDM_RX_FRAME_EHT_GI_LTF_TYPE 0x00000030 +#define OFDM_RX_FRAME_EHT_NUM_OF_LTF_SYM 0x000001c0 +#define OFDM_RX_FRAME_EHT_CODING_EXTRA_SYM 0x00000200 +#define OFDM_RX_FRAME_EHT_PRE_FEC_PAD_FACTOR 0x00000c00 +#define OFDM_RX_FRAME_EHT_PE_DISAMBIG 0x00001000 +#define OFDM_RX_FRAME_EHT_USIG_OVF_DISREGARD 0x0001e000 +#define OFDM_RX_FRAME_EHT_NUM_OF_USERS 0x000e0000 +#define OFDM_RX_FRAME_EHT_NSTS 0x00f00000 +#define OFDM_RX_FRAME_EHT_BF 0x01000000 +#define OFDM_RX_FRAME_EHT_USIG_OVF_NDP_DISREGARD 0x06000000 +#define OFDM_RX_FRAME_EHTSIG_COMM_CC1_CRC_OK 0x08000000 +#define OFDM_RX_FRAME_EHTSIG_COMM_CC2_CRC_OK 0x10000000 +#define OFDM_RX_FRAME_EHT_NON_VALID_RU_ALLOC 0x20000000 +#define OFDM_RX_FRAME_EHT_NO_STREAMS 0x40000000 + __le32 b1; +#define OFDM_RX_FRAME_EHT_MATCH_ID_FOUND 0x00000001 +#define OFDM_RX_FRAME_EHT_ID_INDX 0x0000000e +#define OFDM_RX_FRAME_EHT_MCS 0x000000f0 +#define OFDM_RX_FRAME_EHT_CODING 0x00000100 +#define OFDM_RX_FRAME_EHT_SPATIAL_CONFIG 0x00007e00 +#define OFDM_RX_FRAME_EHT_STA_RU 0x007f8000 +#define OFDM_RX_FRAME_EHT_STA_RU_P80 0x00008000 +#define OFDM_RX_FRAME_EHT_STA_RU_PS160 0x00800000 +#define OFDM_RX_FRAME_EHT_USER_FIELD_CRC_OK 0x40000000 + __le32 b2; +#define OFDM_RX_FRAME_EHT_NUM_OF_DATA_SYM 0x000007ff +#define OFDM_RX_FRAME_EHT_PE_DURATION 0x00003800 +#define OFDM_RX_FRAME_EHT_NUM_OF_DATA_SYM_VALID 0x80000000 + __le32 sig2; +#define OFDM_RX_FRAME_EHT_RU_ALLOC_0_A1 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_0_A2 0x0003fe00 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_0_A3 0x07fc0000 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_1_B1 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_1_B2 0x0003fe00 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_1_B3 0x07fc0000 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_2_C1 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_2_C2 0x0003fe00 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_2_C3 0x07fc0000 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_3_D1 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_3_D2 0x0003fe00 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_3_D3 0x07fc0000 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_4_A4 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_4_B4 0x0003fe00 +#define OFDM_RX_FRAME_EHT_RU_ALLOC_5_C4 0x000001ff +#define OFDM_RX_FRAME_EHT_RU_ALLOC_5_D4 0x0003fe00 + __le32 cmn[6]; +#define OFDM_RX_FRAME_EHT_USER_FIELD_ID 0x000007ff + __le32 user_id; +}; + +struct iwl_eht_tb_sigs { + /* same as non-TB above */ + __le32 usig_a1, usig_a2_eht; + /* same as HE TB above */ + __le32 tb_rx0, tb_rx1; +}; + +struct iwl_uhr_sigs { + __le32 usig_a1, usig_a1_uhr, usig_a2_uhr, b1, b2; + __le32 sig2; + __le32 cmn[6]; + __le32 user_id; +}; + +struct iwl_uhr_tb_sigs { + __le32 usig_a1, usig_a2_uhr, tb_rx0, tb_rx1; +}; + +struct iwl_uhr_elr_sigs { + __le32 usig_a1, usig_a2_uhr; + __le32 uhr_sig_elr1, uhr_sig_elr2; +}; + +union iwl_sigs { + struct iwl_ht_sigs ht; + struct iwl_vht_sigs vht; + struct iwl_he_sigs he; + struct iwl_he_tb_sigs he_tb; + struct iwl_eht_sigs eht; + struct iwl_eht_tb_sigs eht_tb; + struct iwl_uhr_sigs uhr; + struct iwl_uhr_tb_sigs uhr_tb; + struct iwl_uhr_elr_sigs uhr_elr; +}; + +enum iwl_sniffer_status { + IWL_SNIF_STAT_PLCP_RX_OK = 0, + IWL_SNIF_STAT_AID_NOT_FOR_US = 1, + IWL_SNIF_STAT_PLCP_RX_LSIG_ERR = 2, + IWL_SNIF_STAT_PLCP_RX_SIGA_ERR = 3, + IWL_SNIF_STAT_PLCP_RX_SIGB_ERR = 4, + IWL_SNIF_STAT_UNEXPECTED_TB = 5, + IWL_SNIF_STAT_UNSUPPORTED_RATE = 6, + IWL_SNIF_STAT_UNKNOWN_ERROR = 7, +}; /* AIR_SNIFFER_STATUS_E_VER_1 */ + +enum iwl_sniffer_flags { + IWL_SNIF_FLAG_VALID_TB_RX = BIT(0), + IWL_SNIF_FLAG_VALID_RU = BIT(1), +}; /* AIR_SNIFFER_FLAGS_E_VER_1 */ + +/** + * struct iwl_rx_phy_air_sniffer_ntfy - air sniffer notification + * + * @status: &enum iwl_sniffer_status + * @flags: &enum iwl_sniffer_flags + * @reserved1: reserved + * @rssi_a: energy chain-A in negative dBm, measured at FINA time + * @rssi_b: energy chain-B in negative dBm, measured at FINA time + * @channel: channel number + * @band: band information, PHY_BAND_* + * @on_air_rise_time: GP2 at on air rise + * @frame_time: frame time in us + * @rate: RATE_MCS_* + * @bytecount: byte count for legay and HT, otherwise number of symbols + * @legacy_sig: CCK signal information if %RATE_MCS_MOD_TYPE_MSK in @rate is + * %RATE_MCS_MOD_TYPE_CCK, otherwise OFDM signal information + * @sigs: PHY signal information, depending on %RATE_MCS_MOD_TYPE_MSK in @rate + * @reserved2: reserved + * + * Sent for every frame and before the normal RX command if data is included. + */ +struct iwl_rx_phy_air_sniffer_ntfy { + u8 status; + u8 flags; + u8 reserved1[2]; + u8 rssi_a, rssi_b; + u8 channel, band; + __le32 on_air_rise_time; + __le32 frame_time; + /* note: MCS in rate is not valid for MU-VHT */ + __le32 rate; + __le32 bytecount; + union iwl_legacy_sig legacy_sig; + union iwl_sigs sigs; + __le32 reserved2; +}; /* RX_PHY_AIR_SNIFFER_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index f486d624500b..60f0a4924ddf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -129,7 +129,7 @@ struct iwl_scan_offload_profile { } __packed; /** - * struct iwl_scan_offload_profile_cfg_data + * struct iwl_scan_offload_profile_cfg_data - scan offload profile configs * @blocklist_len: length of blocklist * @num_profiles: num of profiles in the list * @match_notify: clients waiting for match found notification @@ -159,7 +159,7 @@ struct iwl_scan_offload_profile_cfg_v1 { } __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1-2*/ /** - * struct iwl_scan_offload_profile_cfg + * struct iwl_scan_offload_profile_cfg - scan offload profile config * @profiles: profiles to search for match * @data: the rest of the data for profile_cfg */ @@ -507,7 +507,7 @@ enum iwl_uhb_chan_cfg_flags { IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE = BIT(26), }; /** - * struct iwl_scan_dwell + * struct iwl_scan_dwell - scan dwell configuration * @active: default dwell time for active scan * @passive: default dwell time for passive scan * @fragmented: default dwell time for fragmented scan @@ -728,7 +728,7 @@ enum iwl_umac_scan_general_params_flags2 { }; /** - * struct iwl_scan_channel_cfg_umac + * struct iwl_scan_channel_cfg_umac - scan channel config * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. * @v1: command version 1 @@ -774,7 +774,7 @@ struct iwl_scan_channel_cfg_umac { } __packed; /** - * struct iwl_scan_umac_schedule + * struct iwl_scan_umac_schedule - scan schedule parameters * @interval: interval in seconds between scan iterations * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop * @reserved: for alignment and future use @@ -815,7 +815,7 @@ struct iwl_scan_req_umac_tail_v2 { } __packed; /** - * struct iwl_scan_umac_chan_param + * struct iwl_scan_umac_chan_param - scan channel parameters * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @reserved: for future use and alignment @@ -827,33 +827,37 @@ struct iwl_scan_umac_chan_param { } __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */ /** - * struct iwl_scan_req_umac + * struct iwl_scan_req_umac - scan request command * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags + * @reserved: reserved * @scan_start_mac_id: report the scan start TSF time according to this mac TSF - * @extended_dwell: dwell time for channels 1, 6 and 11 - * @active_dwell: dwell time for active scan per LMAC - * @passive_dwell: dwell time for passive scan per LMAC - * @fragmented_dwell: dwell time for fragmented passive scan - * @adwell_default_n_aps: for adaptive dwell the default number of APs + * @v1: version 1 command data + * @v6: version 6 command data + * @v7: version 7 command data + * @v8: version 8 command data + * @v9: version 9 command data + * @v1.extended_dwell: dwell time for channels 1, 6 and 11 + * @v1.active_dwell: dwell time for active scan per LMAC + * @v1.passive_dwell: dwell time for passive scan per LMAC + * @v1.fragmented_dwell: dwell time for fragmented passive scan + * @v7.adwell_default_n_aps: for adaptive dwell the default number of APs * per channel - * @adwell_default_n_aps_social: for adaptive dwell the default + * @v7.adwell_default_n_aps_social: for adaptive dwell the default * number of APs per social (1,6,11) channel - * @general_flags2: &enum iwl_umac_scan_general_flags2 - * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added - * to total scan time - * @max_out_time: max out of serving channel time, per LMAC - for CDB there - * are 2 LMACs - * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs - * @scan_priority: scan internal prioritization &enum iwl_scan_priority - * @num_of_fragments: Number of fragments needed for full coverage per band. + * @v8.general_flags2: &enum iwl_umac_scan_general_flags2 + * @v7.adwell_max_budget: for adaptive dwell the maximal budget of TU to be + * added to total scan time + * @v1.max_out_time: max out of serving channel time, per LMAC - for CDB + * there are 2 LMACs + * @v1.suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs + * @v1.scan_priority: scan internal prioritization &enum iwl_scan_priority + * @v8.num_of_fragments: Number of fragments needed for full coverage per band. * Relevant only for fragmented scan. - * @channel: &struct iwl_scan_umac_chan_param - * @reserved: for future use and alignment - * @reserved3: for future use and alignment - * @data: &struct iwl_scan_channel_cfg_umac and + * @v1.channel: &struct iwl_scan_umac_chan_param + * @v1.data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ struct iwl_scan_req_umac { @@ -939,7 +943,7 @@ struct iwl_scan_req_umac { #define IWL_SCAN_REQ_UMAC_SIZE_V1 36 /** - * struct iwl_scan_probe_params_v3 + * struct iwl_scan_probe_params_v3 - scan probe parameters * @preq: scan probe request params * @ssid_num: number of valid SSIDs in direct scan array * @short_ssid_num: number of valid short SSIDs in short ssid array @@ -961,7 +965,7 @@ struct iwl_scan_probe_params_v3 { } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */ /** - * struct iwl_scan_probe_params_v4 + * struct iwl_scan_probe_params_v4 - scan probe parameters * @preq: scan probe request params * @short_ssid_num: number of valid short SSIDs in short ssid array * @bssid_num: number of valid bssid in bssids array @@ -983,7 +987,7 @@ struct iwl_scan_probe_params_v4 { #define SCAN_MAX_NUM_CHANS_V3 67 /** - * struct iwl_scan_channel_params_v4 + * struct iwl_scan_channel_params_v4 - channel params * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @num_of_aps_override: override the number of APs the FW uses to calculate @@ -1006,7 +1010,7 @@ struct iwl_scan_channel_params_v4 { SCAN_CHANNEL_PARAMS_API_S_VER_5 */ /** - * struct iwl_scan_channel_params_v7 + * struct iwl_scan_channel_params_v7 - channel params * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @n_aps_override: override the number of APs the FW uses to calculate dwell @@ -1024,7 +1028,7 @@ struct iwl_scan_channel_params_v7 { } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */ /** - * struct iwl_scan_general_params_v11 + * struct iwl_scan_general_params_v11 - channel params * @flags: &enum iwl_umac_scan_general_flags_v2 * @reserved: reserved for future * @scan_start_mac_or_link_id: report the scan start TSF time according to this @@ -1066,7 +1070,7 @@ struct iwl_scan_general_params_v11 { } __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_12, *_VER_11 and *_VER_10 */ /** - * struct iwl_scan_periodic_parms_v1 + * struct iwl_scan_periodic_parms_v1 - periodicity parameters * @schedule: can scheduling parameter * @delay: initial delay of the periodic scan in seconds * @reserved: reserved for future @@ -1078,7 +1082,7 @@ struct iwl_scan_periodic_parms_v1 { } __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ /** - * struct iwl_scan_req_params_v12 + * struct iwl_scan_req_params_v12 - scan request parameters (v12) * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v4 * @periodic_params: &struct iwl_scan_periodic_parms_v1 @@ -1106,7 +1110,7 @@ struct iwl_scan_req_params_v17 { } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_17 - 14 */ /** - * struct iwl_scan_req_umac_v12 + * struct iwl_scan_req_umac_v12 - scan request command (v12) * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters @@ -1130,7 +1134,7 @@ struct iwl_scan_req_umac_v17 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_17 - 14 */ /** - * struct iwl_umac_scan_abort + * struct iwl_umac_scan_abort - scan abort command * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @flags: reserved */ @@ -1140,7 +1144,7 @@ struct iwl_umac_scan_abort { } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ /** - * enum iwl_umac_scan_abort_status + * enum iwl_umac_scan_abort_status - scan abort status * * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress @@ -1153,7 +1157,7 @@ enum iwl_umac_scan_abort_status { }; /** - * struct iwl_umac_scan_complete + * struct iwl_umac_scan_complete - scan complete notification * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @last_schedule: last scheduling line * @last_iter: last scan iteration number diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index ecbcd5084cd8..e6f9abdfa546 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021, 2023, 2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -428,7 +428,7 @@ struct iwl_mvm_rm_sta_cmd { } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ /** - * struct iwl_mvm_mgmt_mcast_key_cmd_v1 + * struct iwl_mvm_mgmt_mcast_key_cmd_v1 - IGTK command * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK key material @@ -449,7 +449,7 @@ struct iwl_mvm_mgmt_mcast_key_cmd_v1 { } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ /** - * struct iwl_mvm_mgmt_mcast_key_cmd + * struct iwl_mvm_mgmt_mcast_key_cmd - IGTK command * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK master key diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 00713a991879..8d9a5058d5a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -26,7 +26,7 @@ struct mvm_statistics_div { } __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ /** - * struct mvm_statistics_rx_non_phy + * struct mvm_statistics_rx_non_phy - non-PHY RX statistics * @bogus_cts: CTS received when not expecting CTS * @bogus_ack: ACK received when not expecting ACK * @non_channel_beacons: beacons with our bss id but not on our serving channel @@ -456,7 +456,7 @@ struct iwl_system_statistics_cmd { } __packed; /* STATISTICS_FW_CMD_API_S_VER_1 */ /** - * enum iwl_fw_statistics_type + * enum iwl_fw_statistics_type - statistics type * * @FW_STATISTICS_OPERATIONAL: operational statistics * @FW_STATISTICS_PHY: phy statistics @@ -478,7 +478,7 @@ enum iwl_fw_statistics_type { #define IWL_STATISTICS_TYPE_MSK 0x7f /** - * struct iwl_statistics_ntfy_hdr + * struct iwl_statistics_ntfy_hdr - statistics notification header * * @type: struct type * @version: version of the struct @@ -491,7 +491,7 @@ struct iwl_statistics_ntfy_hdr { }; /* STATISTICS_NTFY_HDR_API_S_VER_1 */ /** - * struct iwl_stats_ntfy_per_link + * struct iwl_stats_ntfy_per_link - per-link statistics * * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -514,7 +514,7 @@ struct iwl_stats_ntfy_per_link { } __packed; /* STATISTICS_NTFY_PER_LINK_API_S_VER_1 */ /** - * struct iwl_stats_ntfy_part1_per_link + * struct iwl_stats_ntfy_part1_per_link - part1 per link statistics * * @rx_time: rx time * @tx_time: tx time @@ -533,7 +533,7 @@ struct iwl_stats_ntfy_part1_per_link { } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_PER_LINK_API_S_VER_1 */ /** - * struct iwl_stats_ntfy_per_mac + * struct iwl_stats_ntfy_per_mac - per MAC statistics * * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -556,7 +556,8 @@ struct iwl_stats_ntfy_per_mac { } __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */ #define IWL_STATS_MAX_BW_INDEX 5 -/** struct iwl_stats_ntfy_per_phy +/** + * struct iwl_stats_ntfy_per_phy - per PHY statistics * @channel_load: channel load * @channel_load_by_us: device contribution to MCLM * @channel_load_not_by_us: other devices' contribution to MCLM @@ -588,7 +589,7 @@ struct iwl_stats_ntfy_per_phy { #define IWL_STATS_UNKNOWN_CHANNEL_LOAD 0xffffffff /** - * struct iwl_stats_ntfy_per_sta + * struct iwl_stats_ntfy_per_sta - per STA statistics * * @average_energy: in fact it is minus the energy.. */ @@ -600,7 +601,7 @@ struct iwl_stats_ntfy_per_sta { #define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1) /** - * struct iwl_system_statistics_notif_oper + * struct iwl_system_statistics_notif_oper - statistics notification * * @time_stamp: time when the notification is sent from firmware * @per_link: per link statistics, &struct iwl_stats_ntfy_per_link @@ -615,7 +616,7 @@ struct iwl_system_statistics_notif_oper { } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ /** - * struct iwl_system_statistics_part1_notif_oper + * struct iwl_system_statistics_part1_notif_oper - part1 stats notification * * @time_stamp: time when the notification is sent from firmware * @per_link: per link statistics &struct iwl_stats_ntfy_part1_per_link @@ -628,7 +629,7 @@ struct iwl_system_statistics_part1_notif_oper { } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */ /** - * struct iwl_system_statistics_end_notif + * struct iwl_system_statistics_end_notif - statistics end notification * * @time_stamp: time when the notification is sent from firmware */ @@ -637,7 +638,7 @@ struct iwl_system_statistics_end_notif { } __packed; /* STATISTICS_FW_NTFY_END_API_S_VER_1 */ /** - * struct iwl_statistics_operational_ntfy + * struct iwl_statistics_operational_ntfy - operational stats notification * * @hdr: general statistics header * @flags: bitmap of possible notification structures @@ -662,7 +663,7 @@ struct iwl_statistics_operational_ntfy { } __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */ /** - * struct iwl_statistics_operational_ntfy_ver_14 + * struct iwl_statistics_operational_ntfy_ver_14 - operational stats notification * * @hdr: general statistics header * @flags: bitmap of possible notification structures @@ -707,7 +708,7 @@ struct iwl_statistics_operational_ntfy_ver_14 { } __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */ /** - * struct iwl_statistics_phy_ntfy + * struct iwl_statistics_phy_ntfy - PHY statistics notification * * @hdr: general statistics header * RX PHY related statistics @@ -808,7 +809,7 @@ struct iwl_statistics_phy_ntfy { } __packed; /* STATISTICS_PHY_NTFY_API_S_VER_1 */ /** - * struct iwl_statistics_mac_ntfy + * struct iwl_statistics_mac_ntfy - MAC statistics notification * * @hdr: general statistics header * @bcast_filter_passed_per_mac: bcast filter passed per mac @@ -827,7 +828,7 @@ struct iwl_statistics_mac_ntfy { } __packed; /* STATISTICS_MAC_NTFY_API_S_VER_1 */ /** - * struct iwl_statistics_rx_ntfy + * struct iwl_statistics_rx_ntfy - RX statistics notification * * @hdr: general statistics header * @rx_agg_mpdu_cnt: aggregation frame count (number of @@ -867,7 +868,7 @@ struct iwl_statistics_rx_ntfy { } __packed; /* STATISTICS_RX_NTFY_API_S_VER_1 */ /** - * struct iwl_statistics_tx_ntfy + * struct iwl_statistics_tx_ntfy - TX statistics notification * * @hdr: general statistics header * @cts_timeout: timeout when waiting for CTS @@ -976,7 +977,7 @@ struct iwl_statistics_tx_ntfy { } __packed; /* STATISTICS_TX_NTFY_API_S_VER_1 */ /** - * struct iwl_statistics_duration_ntfy + * struct iwl_statistics_duration_ntfy - burst/duration statistics * * @hdr: general statistics header * @cont_burst_chk_cnt: number of times continuation or @@ -995,7 +996,7 @@ struct iwl_statistics_duration_ntfy { } __packed; /* STATISTICS_DURATION_NTFY_API_S_VER_1 */ /** - * struct iwl_statistics_he_ntfy + * struct iwl_statistics_he_ntfy - HE statistics * * @hdr: general statistics header * received HE frames diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 26d2013905ed..31d3336726b4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -963,7 +963,7 @@ struct iwl_scd_txq_cfg_cmd { } __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ /** - * struct iwl_scd_txq_cfg_rsp + * struct iwl_scd_txq_cfg_rsp - scheduler TXQ configuration response * @token: taken from the command * @sta_id: station id from the command * @tid: tid from the command diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index c2a73cc85eff..525a82030daa 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -266,7 +266,7 @@ struct iwl_fw_ini_error_dump_data { } __packed; /** - * struct iwl_fw_ini_dump_entry + * struct iwl_fw_ini_dump_entry - dump entry descriptor * @list: list of dump entries * @size: size of the data * @data: entry data @@ -305,7 +305,7 @@ struct iwl_fw_ini_fifo_hdr { * @dram_base_addr: base address of dram monitor range * @page_num: page number of memory range * @fifo_hdr: fifo header of memory range - * @fw_pkt: FW packet header of memory range + * @fw_pkt_hdr: FW packet header of memory range * @data: the actual memory */ struct iwl_fw_ini_error_dump_range { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index b7c1ab7a3006..b9e0b69c6680 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -222,7 +222,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning + * @IWL_UCODE_TLV_API_OCE: support for OCE + * @IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE: new beacon template * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used + * @IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL: WoWLAN key material support * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is @@ -245,6 +248,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S + * @IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE: WoWLAN TCP-SYN wake support * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API * is supported by FW, this indicates the RTT confidence value * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar @@ -253,6 +257,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * SCAN_CONFIG_DB_CMD_API_S. * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell * number of APs in the 5 GHz band + * @IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER: extended channel config in scan * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification * @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx * logic. @@ -352,16 +357,24 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting * stabilization latency for SoCs. * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification + * @IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT: binding CDB support + * @IWL_UCODE_TLV_CAPA_CDB_SUPPORT: CDB support + * @IWL_UCODE_TLV_CAPA_D0I3_END_FIRST: D0I3 end command comes first * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 - * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command + * @IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band * (6 GHz). * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command + * @IWL_UCODE_TLV_CAPA_SET_LTR_GEN2: LTR gen2 support + * @IWL_UCODE_TLV_CAPA_TAS_CFG: TAS configuration support + * @IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD: session protection command + * @IWL_UCODE_TLV_CAPA_SET_PPAG: PPAG support * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT + * @IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT: MQ RX support * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA * countdown offloading. Beacon notifications are not sent to the host. * The fw also offloads TBTT alignment. @@ -383,23 +396,46 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * command size (command version 4) that supports toggling ACK TX * power reduction. * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 + * @IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT: LED command support * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured * to report the CSI information with (certain) RX frames + * @IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP: suspend/resume command + * @IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP: support for DBGC + * buffer allocation command * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA: supports (de)activating UNII-4 * for US/CA/WW from BIOS + * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels + * @IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT: BIGTK support * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC * channels even when these are not enabled. + * @IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN: hidden SSID 6 GHz scan support + * @IWL_UCODE_TLV_CAPA_BROADCAST_TWT: broadcast TWT support + * @IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO: support for BT-coex high + * priority for 802.1X/4-way-HS + * @IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT: multi-link BAID support + * @IWL_UCODE_TLV_CAPA_SYNCED_TIME: synced time command support + * @IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM: time sync support + * @IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT: BIGTK TX support + * @IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT: MLD API support + * @IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT: fixed antenna scan support + * @IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT: PPAG China BIOS support + * @IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT: BTM protocol offload support + * @IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT: STA command MFP support + * @IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT: sniffer validate bits support + * @IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT: China 2022 regulator support * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection * complete to FW. * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload * protected) A-MSDU. + * @IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT: support for DBGC fragmented + * DRAM buffers * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement. * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise * passive channels @@ -407,6 +443,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * for CA from BIOS. * @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA * @IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT: external FSEQ image support + * @IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT: FW reset handshake is needed + * during assert handling even if the dump isn't split * @IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE: Firmware has capability of * handling raw DSM table data. * @@ -487,12 +525,7 @@ enum iwl_ucode_tlv_capa { /* set 3 */ IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA = (__force iwl_ucode_tlv_capa_t)96, - - /* - * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels - */ IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, - IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, @@ -514,11 +547,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT = (__force iwl_ucode_tlv_capa_t)125, /* set 4 */ - /** - * @IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT: FW reset handshake is needed - * during assert handling even if the dump isn't split - */ - IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 0), + + IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 0), IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 1), NUM_IWL_UCODE_TLV_CAPA /* @@ -852,6 +882,8 @@ struct iwl_fw_dbg_trigger_low_rssi { * @start_assoc_denied: number of denied association to start recording * @start_assoc_timeout: number of association timeout to start recording * @start_connection_loss: number of connection loss to start recording + * @reserved: reserved + * @reserved2: reserved */ struct iwl_fw_dbg_trigger_mlme { u8 stop_auth_denied; @@ -885,6 +917,7 @@ struct iwl_fw_dbg_trigger_mlme { * @p2p_device: timeout for the queues of a P2P device in ms * @ibss: timeout for the queues of an IBSS in ms * @tdls: timeout for the queues of a TDLS station in ms + * @reserved: reserved */ struct iwl_fw_dbg_trigger_txq_timer { __le32 command_queue; @@ -900,7 +933,7 @@ struct iwl_fw_dbg_trigger_txq_timer { /** * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger - * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a + * @time_events: a list of tuples <id, action_bitmap>. The driver will issue a * trigger each time a time event notification that relates to time event * id with one of the actions in the bitmap is received and * BIT(notif->status) is set in status_bitmap. @@ -916,19 +949,19 @@ struct iwl_fw_dbg_trigger_time_event { /** * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger - * rx_ba_start: tid bitmap to configure on what tid the trigger should occur + * @rx_ba_start: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is started. - * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * @rx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is stopped. - * tx_ba_start: tid bitmap to configure on what tid the trigger should occur + * @tx_ba_start: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is started. - * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * @tx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is stopped. - * rx_bar: tid bitmap to configure on what tid the trigger should occur + * @rx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is received (for a Tx BlockAck session). - * tx_bar: tid bitmap to configure on what tid the trigger should occur + * @tx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is send (for an Rx BlocAck session). - * frame_timeout: tid bitmap to configure on what tid the trigger should occur + * @frame_timeout: tid bitmap to configure on what tid the trigger should occur * when a frame times out in the reordering buffer. */ struct iwl_fw_dbg_trigger_ba { @@ -946,6 +979,7 @@ struct iwl_fw_dbg_trigger_ba { * @action_bitmap: the TDLS action to trigger the collection upon * @peer_mode: trigger on specific peer or all * @peer: the TDLS peer to trigger the collection on + * @reserved: reserved */ struct iwl_fw_dbg_trigger_tdls { u8 action_bitmap; @@ -958,6 +992,7 @@ struct iwl_fw_dbg_trigger_tdls { * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response * status. * @statuses: the list of statuses to trigger the collection on + * @reserved: reserved */ struct iwl_fw_dbg_trigger_tx_status { struct tx_status { @@ -971,6 +1006,7 @@ struct iwl_fw_dbg_trigger_tx_status { * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used + * @reserved: reserved * @num_of_hcmds: how many HCMDs to send are present here * @hcmd: a variable length host command to be sent to apply the configuration. * If there is more than one HCMD to send, they will appear one after the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 5256f20623e9..045a3e009429 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -14,14 +14,13 @@ #include "error-dump.h" /** - * enum iwl_ucode_type - * - * The type of ucode. + * enum iwl_ucode_type - type of ucode * * @IWL_UCODE_REGULAR: Normal runtime ucode * @IWL_UCODE_INIT: Initial ucode * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image + * @IWL_UCODE_TYPE_MAX: (internal value) */ enum iwl_ucode_type { IWL_UCODE_REGULAR, @@ -122,7 +121,7 @@ struct fw_img { #define FW_ADDR_CACHE_CONTROL 0xC0000000UL /** - * struct iwl_fw_paging + * struct iwl_fw_paging - FW paging descriptor * @fw_paging_phys: page phy pointer * @fw_paging_block: pointer to the allocated block * @fw_paging_size: page size @@ -197,6 +196,11 @@ struct iwl_dump_exclude { * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image * @pnvm_data: PNVM data embedded in the .ucode file, if any * @pnvm_size: size of the embedded PNVM data + * @dbg: debug data, see &struct iwl_fw_dbg + * @default_calib: default calibration data + * @phy_config: PHY configuration flags + * @valid_rx_ant: valid RX antenna bitmap + * @valid_tx_ant: valid TX antenna bitmap */ struct iwl_fw { u32 ucode_ver; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index e1f28b053253..d2ad169ae880 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -543,32 +543,14 @@ static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) switch (cmd_ver) { case 12: - case 11: cmd_size = sizeof(struct iwl_lari_config_change_cmd); break; - case 10: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10); - break; - case 9: case 8: - case 7: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v8); break; case 6: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); break; - case 5: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); - break; - case 4: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); - break; - case 3: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); - break; - case 2: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); - break; default: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); break; @@ -609,11 +591,11 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, if (!has_raw_dsm_capa) value &= DSM_UNII4_ALLOW_BITMAP; - /* Since version 9, bits 4 and 5 are supported + /* Since version 12, bits 4 and 5 are supported * regardless of this capability, By pass this masking * if firmware has capability of accepting raw DSM table. */ - if (!has_raw_dsm_capa && cmd_ver < 9 && + if (!has_raw_dsm_capa && cmd_ver < 12 && !fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA)) value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | @@ -637,7 +619,7 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, if (!has_raw_dsm_capa && cmd_ver < 12 && !fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA)) - value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; + value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V8; cmd->chan_state_active_bitmap = cpu_to_le32(value); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 806f9bcdf4f5..57570ff15622 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -45,6 +45,8 @@ struct iwl_fwrt_shared_mem_cfg { * struct iwl_fwrt_dump_data - dump data * @trig: trigger the worker was scheduled upon * @fw_pkt: packet received from FW + * @desc: dump descriptor + * @monitor_only: only dump for monitor * * Note that the decision which part of the union is used * is based on iwl_trans_dbg_ini_valid(): the 'trig' part @@ -68,6 +70,7 @@ struct iwl_fwrt_dump_data { * struct iwl_fwrt_wk_data - dump worker data struct * @idx: index of the worker * @wk: worker + * @dump_data: dump data */ struct iwl_fwrt_wk_data { u8 idx; @@ -91,8 +94,8 @@ struct iwl_txf_iter_data { /** * struct iwl_fw_runtime - runtime data for firmware + * @trans: transport pointer * @fw: firmware image - * @cfg: NIC configuration * @dev: device pointer * @ops: user ops * @ops_ctx: user ops context @@ -117,6 +120,23 @@ struct iwl_txf_iter_data { * zero (default initialization) means it hasn't been read yet, * and BIT(0) is set when it has since function 0 also has this * bitmap and is always supported + * @geo_enabled: WGDS table is present + * @geo_num_profiles: number of geo profiles + * @geo_rev: geo profiles table revision + * @ppag_chains: PPAG table data + * @ppag_flags: PPAG flags + * @reduced_power_flags: reduced power flags + * @sanitize_ctx: context for dump sanitizer + * @sanitize_ops: dump sanitizer ops + * @sar_chain_a_profile: SAR chain A profile + * @sar_chain_b_profile: SAR chain B profile + * @sgom_enabled: SGOM enabled + * @sgom_table: SGOM table + * @timestamp: timestamp marker data + * @timestamp.wk: timestamp marking worker + * @timestamp.seq: timestamp marking sequence + * @timestamp.delay: timestamp marking worker delay + * @tpc_enabled: TPC enabled */ struct iwl_fw_runtime { struct iwl_trans *trans; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index a607e7ab914b..076810ee5d34 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -170,7 +170,6 @@ struct iwl_fw_mon_regs { * for aggregation * @min_txq_size: minimum number of slots required in a TX queue * @gp2_reg_addr: GP2 (timer) register address - * @min_umac_error_event_table: minimum SMEM location of UMAC error table * @mon_dbgi_regs: monitor DBGI registers * @mon_dram_regs: monitor DRAM registers * @mon_smem_regs: monitor SMEM registers @@ -203,7 +202,6 @@ struct iwl_family_base_params { netdev_features_t features; u32 smem_offset; u32 smem_len; - u32 min_umac_error_event_table; u32 d3_debug_data_base_addr; u32 d3_debug_data_length; u32 min_txq_size; @@ -385,7 +383,7 @@ struct iwl_mac_cfg { #define IWL_NUM_RBDS_EHT (512 * 8) /** - * struct iwl_rf_cfg + * struct iwl_rf_cfg - RF/CRF configuration data * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as <fw_name_pre>-<api>.ucode. @@ -418,6 +416,7 @@ struct iwl_mac_cfg { * @vht_mu_mimo_supported: VHT MU-MIMO support * @nvm_type: see &enum iwl_nvm_type * @uhb_supported: ultra high band channels supported + * @eht_supported: EHT supported * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) * @@ -450,7 +449,8 @@ struct iwl_rf_cfg { host_interrupt_operation_mode:1, lp_xtal_workaround:1, vht_mu_mimo_supported:1, - uhb_supported:1; + uhb_supported:1, + eht_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -686,8 +686,10 @@ extern const char iwl_be211_name[]; extern const char iwl_killer_bn1850w2_name[]; extern const char iwl_killer_bn1850i_name[]; extern const char iwl_bn201_name[]; +extern const char iwl_bn203_name[]; extern const char iwl_be221_name[]; extern const char iwl_be223_name[]; +extern const char iwl_ax221_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_rf_cfg iwl5300_agn_cfg; extern const struct iwl_rf_cfg iwl5350_agn_cfg; @@ -743,6 +745,7 @@ extern const struct iwl_rf_cfg iwl_rf_fm; extern const struct iwl_rf_cfg iwl_rf_fm_160mhz; #define iwl_rf_wh iwl_rf_fm #define iwl_rf_wh_160mhz iwl_rf_fm_160mhz +extern const struct iwl_rf_cfg iwl_rf_wh_non_eht; #define iwl_rf_pe iwl_rf_fm #endif /* CONFIG_IWLMLD */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 7ed6329fd8ca..fe4e46a0edbd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2023, 2025 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ @@ -32,7 +32,7 @@ union iwl_dbg_tlv_tp_data { }; /** - * struct iwl_dbg_tlv_time_point_data + * struct iwl_dbg_tlv_time_point_data - debug time point data * @trig_list: list of triggers * @active_trig_list: list of active triggers * @hcmd_list: list of host commands diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 607fcea6f4ef..3391f07b01de 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -177,9 +177,10 @@ static inline char iwl_drv_get_step(int step) return 'a' + step; } -static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans) +bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans) { - return CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM; + return trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && + CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM; } const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) @@ -347,8 +348,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) if (first) drv->fw_index = ucode_api_max; - else if (drv->fw_index == ENCODE_CORE_AS_API(99)) - drv->fw_index = 101; /* last API-scheme number below core 99 */ + else if (drv->fw_index == ENCODE_CORE_AS_API(100)) + drv->fw_index = 102; /* last API-scheme number below core 100 */ else drv->fw_index--; @@ -427,7 +428,6 @@ struct iwl_firmware_pieces { size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; - u32 major; }; static void alloc_sec_data(struct iwl_firmware_pieces *pieces, @@ -1069,19 +1069,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; case IWL_UCODE_TLV_FW_VERSION: { const __le32 *ptr = (const void *)tlv_data; - u32 minor; + u32 major, minor; u8 local_comp; if (tlv_len != sizeof(u32) * 3) goto invalid_tlv_len; - pieces->major = le32_to_cpup(ptr++); + major = le32_to_cpup(ptr++); minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), - "%u.%08x.%u %s", pieces->major, minor, + "%u.%08x.%u %s", major, minor, local_comp, iwl_reduced_fw_name(drv)); break; } @@ -1589,8 +1589,6 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv) } } -#define IWL_MLD_SUPPORTED_FW_VERSION 97 - /* * iwl_req_fw_callback - callback when firmware was loaded * @@ -1859,17 +1857,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } #if IS_ENABLED(CONFIG_IWLMLD) - if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION && - iwl_drv_is_wifi7_supported(drv->trans)) + if (iwl_drv_is_wifi7_supported(drv->trans)) op = &iwlwifi_opmode_table[MLD_OP_MODE]; -#else - if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION && - iwl_drv_is_wifi7_supported(drv->trans)) { - IWL_ERR(drv, - "IWLMLD needs to be compiled to support this firmware\n"); - mutex_unlock(&iwlwifi_opmode_table_mtx); - goto out_unbind; - } #endif IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 595300a14639..6e60953de2ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -62,7 +62,8 @@ struct iwl_rf_cfg; * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe * function should do bus related operations only, and then call to this - * function. It returns the driver object or %NULL if an error occurred. + * function. + * Return: the driver object or %NULL if an error occurred. */ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans); @@ -78,6 +79,12 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans); void iwl_drv_stop(struct iwl_drv *drv); /* + * iwl_drv_is_wifi7_supported - returns if wifi7 is supported + * If yes, iwlmld needs to be used to drive the device. + */ +bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans); + +/* * exported symbol management * * The driver can be split into multiple modules, in which case some symbols diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 21eabfc3ffc8..0476df7b7f17 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022, 2024-2025 Intel Corporation */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ @@ -42,7 +42,7 @@ enum iwl_uapsd_disable { }; /** - * struct iwl_mod_params + * struct iwl_mod_params - module parameters for iwlwifi * * Holds the module parameters * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 23465e4c4b39..e021fc57d85d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -2080,7 +2080,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); - if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM) + if (trans->cfg->eht_supported) nvm->sku_cap_11be_enable = true; /* Initialize PHY sku data */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index cbc92abf9f87..12f28bb0e859 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -115,11 +115,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a - * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, - * to be fed into the regulatory core. In case the geo_info is set handle - * accordingly. An ERR_PTR is returned on error. - * If not given to the regulatory core, the user is responsible for freeing - * the regdomain returned here with kfree. + * MCC_UPDATE_CMD command. + * + * Return: a newly allocation regulatory domain, to be given to the regulatory + * core. In case the geo_info is set handle accordingly. An ERR_PTR is + * returned on error. If not given to the regulatory core, the user is + * responsible for freeing the regdomain returned here with kfree(). * * @trans: the transport * @num_of_ch: the number of channels @@ -140,6 +141,8 @@ iwl_parse_nvm_mcc_info(struct iwl_trans *trans, * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, * and saved for later use by the driver. Not all NVM sections are saved * this way, only the needed ones. + * @length: length of the section + * @data: section data */ struct iwl_nvm_section { u16 length; @@ -148,6 +151,10 @@ struct iwl_nvm_section { /** * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections + * @trans: the transport + * @nvm_file_name: the filename to request + * @nvm_sections: sections data to fill + * Return: 0 on success or an error code */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index a146d0e399f2..df6341dfc4a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -185,6 +185,7 @@ void iwl_opmode_deregister(const char *name); /** * struct iwl_op_mode - operational mode * @ops: pointer to its own ops + * @op_mode_specific: per-opmode data * * This holds an implementation of the mac80211 / fw API. */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index a0cc5d7745e8..a552669db6e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -121,7 +121,7 @@ enum CMD_MODE { #define DEF_CMD_PAYLOAD_SIZE 320 /** - * struct iwl_device_cmd + * struct iwl_device_cmd - device command structure * * For allocation of the command and tx queues, this establishes the overall * size of the largest command we send to uCode, except for commands that @@ -516,7 +516,7 @@ enum iwl_trans_state { */ /** - * enum iwl_ini_cfg_state + * enum iwl_ini_cfg_state - debug config state * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs @@ -532,7 +532,7 @@ enum iwl_ini_cfg_state { #define IWL_TRANS_NMI_TIMEOUT (HZ / 4) /** - * struct iwl_dram_data + * struct iwl_dram_data - DRAM data descriptor * @physical: page phy pointer * @block: pointer to the allocated block/page * @size: size of the block/page diff --git a/drivers/net/wireless/intel/iwlwifi/mld/constants.h b/drivers/net/wireless/intel/iwlwifi/mld/constants.h index 49accf96f44b..5d23a618ae3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/constants.h @@ -75,5 +75,7 @@ #define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT true #define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 #define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 +#define IWL_MLD_STA_EXT_CAPA_SIZE 9 +#define IWL_MLD_EXT_CAPA_NUM_IFTYPES 1 #endif /* __iwl_mld_constants_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c index 1d4282a21f09..dd85be94433c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c @@ -1794,6 +1794,10 @@ iwl_mld_send_proto_offload(struct iwl_mld *mld, u32 enabled = 0; cmd = kzalloc(hcmd.len[0], GFP_KERNEL); + if (!cmd) { + IWL_DEBUG_WOWLAN(mld, "Failed to allocate proto offload cmd\n"); + return -ENOMEM; + } #if IS_ENABLED(CONFIG_IPV6) struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c index b372173c4a79..19da521a4bab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c @@ -124,9 +124,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, u16 status; switch (version) { - case 6: case 7: - expected_sz = sizeof(struct iwl_alive_ntf_v6); + expected_sz = sizeof(struct iwl_alive_ntf_v7); break; case 8: expected_sz = sizeof(struct iwl_alive_ntf); @@ -168,11 +167,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & ~FW_ADDR_CACHE_CONTROL; - if (umac_error_table >= trans->mac_cfg->base->min_umac_error_event_table) - iwl_fw_umac_set_alive_err_table(trans, umac_error_table); - else - IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", - umac_error_table); + iwl_fw_umac_set_alive_err_table(trans, umac_error_table); alive_data->valid = status == IWL_ALIVE_STATUS_OK; @@ -188,9 +183,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, le32_to_cpu(umac->umac_major), le32_to_cpu(umac->umac_minor)); - if (version >= 7) - IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", - le16_to_cpu(palive->flags)); + IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", + le16_to_cpu(palive->flags)); if (version >= 8) IWL_DEBUG_FW(mld, "platform_id 0x%llx\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c index ed379825a923..a5ececfc13e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c @@ -528,6 +528,19 @@ void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld, mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink; + /* len_low should be 2 + n*13 (where n is the number of descriptors. + * 13 is the size of a NoA descriptor). We can have either one or two + * descriptors. + */ + if (IWL_FW_CHECK(mld, notif->noa_active && + notif->noa_attr.len_low != 2 + + sizeof(struct ieee80211_p2p_noa_desc) && + notif->noa_attr.len_low != 2 + + sizeof(struct ieee80211_p2p_noa_desc) * 2, + "Invalid noa_attr.len_low (%d)\n", + notif->noa_attr.len_low)) + return; + new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c index f6f52d297a72..d89840a1152b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c @@ -465,10 +465,13 @@ int iwl_mld_add_link(struct iwl_mld *mld, int ret; if (!link) { - if (is_deflink) + if (is_deflink) { link = &mld_vif->deflink; - else + } else { link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + } } else { WARN_ON(!mld->fw_status.in_hw_restart); } @@ -572,8 +575,12 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, /* Not in EMLSR and we can't hear the link. * Try to switch to a better link. EMLSR case is handled below. */ - if (!iwl_mld_emlsr_active(vif)) + if (!iwl_mld_emlsr_active(vif)) { + IWL_DEBUG_EHT(mld, + "missed beacons exceeds threshold. link_id=%u. Try to switch to a better link.\n", + link_id); iwl_mld_int_mlo_scan(mld, vif); + } } /* no more logic if we're not in EMLSR */ @@ -592,7 +599,8 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, return; IWL_DEBUG_EHT(mld, - "missed bcn on the other link (link_id=%u): %u\n", + "missed bcn link_id=%u: %u consecutive=%u, other link_id=%u: %u\n", + link_id, missed_bcon, missed_bcon_since_rx, other_link->link_id, scnd_lnk_bcn_lost); /* Exit EMLSR if we lost more than diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c index 5725104a53bf..55b484c16280 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -23,6 +23,7 @@ #include "roc.h" #include "mlo.h" #include "stats.h" +#include "iwl-nvm-parse.h" #include "ftm-initiator.h" #include "low_latency.h" #include "fw/api/scan.h" @@ -75,13 +76,12 @@ iwl_mld_iface_combinations[] = { }, }; -static const u8 if_types_ext_capa_sta[] = { - [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, - [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, - [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | - WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, - [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +static const u8 ext_capa_base[IWL_MLD_STA_EXT_CAPA_SIZE] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, }; #define IWL_MLD_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \ @@ -94,18 +94,6 @@ static const u8 if_types_ext_capa_sta[] = { IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT) -static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = { - { - .iftype = NL80211_IFTYPE_STATION, - .extended_capabilities = if_types_ext_capa_sta, - .extended_capabilities_mask = if_types_ext_capa_sta, - .extended_capabilities_len = sizeof(if_types_ext_capa_sta), - /* relevant only if EHT is supported */ - .eml_capabilities = IWL_MLD_EMLSR_CAPA, - .mld_capa_and_ops = IWL_MLD_CAPA_OPS, - }, -}; - static void iwl_mld_hw_set_addresses(struct iwl_mld *mld) { struct wiphy *wiphy = mld->wiphy; @@ -335,21 +323,37 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld) if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); - wiphy->iftype_ext_capab = NULL; - wiphy->num_iftype_ext_capab = 0; - - if (!iwlwifi_mod_params.disable_11ax) { - wiphy->iftype_ext_capab = iftypes_ext_capa; - wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa); - - ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); - ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); - } - if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + /* We are done for non-HE */ + if (iwlwifi_mod_params.disable_11ax) + return; + + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); + ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); + + wiphy->iftype_ext_capab = mld->ext_capab; + wiphy->num_iftype_ext_capab = ARRAY_SIZE(mld->ext_capab); + + BUILD_BUG_ON(sizeof(mld->sta_ext_capab) < sizeof(ext_capa_base)); + + memcpy(mld->sta_ext_capab, ext_capa_base, sizeof(ext_capa_base)); + + mld->ext_capab[0].iftype = NL80211_IFTYPE_STATION; + mld->ext_capab[0].extended_capabilities = mld->sta_ext_capab; + mld->ext_capab[0].extended_capabilities_mask = mld->sta_ext_capab; + mld->ext_capab[0].extended_capabilities_len = sizeof(mld->sta_ext_capab); + + if (!mld->nvm_data->sku_cap_11be_enable || + iwlwifi_mod_params.disable_11be) + return; + + mld->ext_capab[0].eml_capabilities = IWL_MLD_EMLSR_CAPA; + mld->ext_capab[0].mld_capa_and_ops = IWL_MLD_CAPA_OPS; + } static void iwl_mac_hw_set_misc(struct iwl_mld *mld) @@ -393,11 +397,9 @@ static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld) TLC_MNG_UPDATE_NOTIF, 0) >= 4) + (iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, REPLY_RX_MPDU_CMD, 0) >= 6) + - (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP, - RX_NO_DATA_NOTIF, 0) >= 4) + (iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP, TX_CMD, 0) >= 9); - if (ratecheck != 0 && ratecheck != 5) { + if (ratecheck != 0 && ratecheck != 4) { IWL_ERR(mld, "Firmware has inconsistent rates\n"); return -EINVAL; } @@ -680,6 +682,8 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw, #endif iwl_mld_rm_vif(mld, vif); + + mld->monitor.phy.valid = false; } struct iwl_mld_mc_iter_data { @@ -2591,11 +2595,44 @@ iwl_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return NEG_TTLM_RES_ACCEPT; } +static int iwl_mld_get_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 *tx_ant, u32 *rx_ant) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + *tx_ant = iwl_mld_get_valid_tx_ant(mld); + *rx_ant = iwl_mld_get_valid_rx_ant(mld); + + return 0; +} + +static int iwl_mld_set_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 tx_ant, u32 rx_ant) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + if (WARN_ON(!mld->nvm_data)) + return -EBUSY; + + /* mac80211 ensures the device is not started, + * so the firmware cannot be running + */ + + mld->set_tx_ant = tx_ant; + mld->set_rx_ant = rx_ant; + + iwl_reinit_cab(mld->trans, mld->nvm_data, tx_ant, rx_ant, mld->fw); + + return 0; +} + const struct ieee80211_ops iwl_mld_hw_ops = { .tx = iwl_mld_mac80211_tx, .start = iwl_mld_mac80211_start, .stop = iwl_mld_mac80211_stop, .config = iwl_mld_mac80211_config, + .get_antenna = iwl_mld_get_antenna, + .set_antenna = iwl_mld_set_antenna, .add_interface = iwl_mld_mac80211_add_interface, .remove_interface = iwl_mld_mac80211_remove_interface, .conf_tx = iwl_mld_mac80211_conf_tx, diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c index a6962256bdd1..8a4c96385640 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -259,6 +259,7 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = { HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(TLC_MNG_UPDATE_NOTIF), HCMD_NAME(BEACON_FILTER_IN_NOTIF), + HCMD_NAME(PHY_AIR_SNIFFER_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h index 94dc9da6360d..22efe8e10f53 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h @@ -118,7 +118,11 @@ * @monitor.cur_bssid: current bssid tracked by the sniffer * @monitor.ptp_time: set the Rx mactime using the device's PTP clock time * @monitor.p80: primary channel position relative to he whole bandwidth, in - * steps of 80 MHz + * steps of 80 MHz + * @monitor.phy: PHY data information + * @monitor.phy.data: PHY data (&struct iwl_rx_phy_air_sniffer_ntfy) received + * @monitor.phy.valid: PHY data is valid (was received) + * @monitor.phy.used: PHY data was used by an RX * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding * ieee80211_link_sta. This is not cleaned up on restart since we want to * preserve the fw sta ids during a restart (for SN/PN restoring). @@ -134,6 +138,8 @@ * @fw: a pointer to the fw object * @hw: pointer to the hw object. * @wiphy: a pointer to the wiphy struct, for easier access to it. + * @ext_capab: extended capabilities that will be set to wiphy on registration. + * @sta_ext_capab: extended capabilities for the station interface. * @nvm_data: pointer to the nvm_data that includes all our capabilities * @fwrt: fw runtime data * @debugfs_dir: debugfs directory @@ -180,6 +186,8 @@ * @mcast_filter_cmd: pointer to the multicast filter command. * @mgmt_tx_ant: stores the last TX antenna index; used for setting * TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure). + * @set_tx_ant: stores the last TX antenna bitmask set by user space (if any) + * @set_rx_ant: stores the last RX antenna bitmask set by user space (if any) * @fw_rates_ver_3: FW rates are in version 3 * @low_latency: low-latency manager. * @tzone: thermal zone device's data @@ -205,6 +213,10 @@ struct iwl_mld { u32 ampdu_ref; bool ampdu_toggle; u8 p80; + struct { + struct iwl_rx_phy_air_sniffer_ntfy data; + u8 valid:1, used:1; + } phy; #ifdef CONFIG_IWLWIFI_DEBUGFS __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; @@ -225,6 +237,8 @@ struct iwl_mld { const struct iwl_fw *fw; struct ieee80211_hw *hw; struct wiphy *wiphy; + struct wiphy_iftype_ext_capab ext_capab[IWL_MLD_EXT_CAPA_NUM_IFTYPES]; + u8 sta_ext_capab[IWL_MLD_STA_EXT_CAPA_SIZE]; struct iwl_nvm_data *nvm_data; struct iwl_fw_runtime fwrt; struct dentry *debugfs_dir; @@ -279,6 +293,9 @@ struct iwl_mld { u8 mgmt_tx_ant; + u8 set_tx_ant; + u8 set_rx_ant; + bool fw_rates_ver_3; struct iwl_mld_low_latency low_latency; @@ -374,6 +391,9 @@ static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld) if (mld->nvm_data && mld->nvm_data->valid_tx_ant) tx_ant &= mld->nvm_data->valid_tx_ant; + if (mld->set_tx_ant) + tx_ant &= mld->set_tx_ant; + return tx_ant; } @@ -384,6 +404,9 @@ static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld) if (mld->nvm_data && mld->nvm_data->valid_rx_ant) rx_ant &= mld->nvm_data->valid_rx_ant; + if (mld->set_rx_ant) + rx_ant &= mld->set_rx_ant; + return rx_ant; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c index 241a6271d13d..c6b151f26921 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c @@ -31,11 +31,9 @@ static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask) { #define NAME_FMT(x) "%s" #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "", - IWL_DEBUG_INFO(mld, - "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) - " (0x%x)\n", - HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) - mask); + IWL_DEBUG_EHT(mld, + "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) + " (0x%x)\n", HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) mask); #undef NAME_FMT #undef NAME_PR } @@ -72,11 +70,9 @@ static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask) { #define NAME_FMT(x) "%s" #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "", - IWL_DEBUG_INFO(mld, - "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) - " (0x%x)\n", - HANDLE_EMLSR_EXIT_REASONS(NAME_PR) - mask); + IWL_DEBUG_EHT(mld, + "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) + " (0x%x)\n", HANDLE_EMLSR_EXIT_REASONS(NAME_PR) mask); #undef NAME_FMT #undef NAME_PR } @@ -170,10 +166,10 @@ static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld, WARN_ON(mld_vif->emlsr.exit_repeat_count > 3); } - IWL_DEBUG_INFO(mld, - "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", - delay / HZ, mld_vif->emlsr.exit_repeat_count, - iwl_mld_get_emlsr_exit_string(reason), reason); + IWL_DEBUG_EHT(mld, + "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", + delay / HZ, mld_vif->emlsr.exit_repeat_count, + iwl_mld_get_emlsr_exit_string(reason), reason); wiphy_delayed_work_queue(mld->wiphy, &mld_vif->emlsr.prevent_done_wk, delay); @@ -217,10 +213,10 @@ static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, link_to_keep = __ffs(vif->active_links); new_active_links = BIT(link_to_keep); - IWL_DEBUG_INFO(mld, - "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", - iwl_mld_get_emlsr_exit_string(exit), exit, - vif->active_links, new_active_links); + IWL_DEBUG_EHT(mld, + "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", + iwl_mld_get_emlsr_exit_string(exit), exit, + vif->active_links, new_active_links); if (sync) ret = ieee80211_set_active_links(vif, new_active_links); @@ -262,9 +258,8 @@ static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif, mld_vif->emlsr.blocked_reasons |= reason; - IWL_DEBUG_INFO(mld, - "Blocking EMLSR mode. reason = %s (0x%x)\n", - iwl_mld_get_emlsr_blocked_string(reason), reason); + IWL_DEBUG_EHT(mld, "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_mld_get_emlsr_blocked_string(reason), reason); iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) @@ -335,9 +330,8 @@ void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, mld_vif->emlsr.blocked_reasons &= ~reason; - IWL_DEBUG_INFO(mld, - "Unblocking EMLSR mode. reason = %s (0x%x)\n", - iwl_mld_get_emlsr_blocked_string(reason), reason); + IWL_DEBUG_EHT(mld, "Unblocking EMLSR mode. reason = %s (0x%x)\n", + iwl_mld_get_emlsr_blocked_string(reason), reason); iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) @@ -348,7 +342,7 @@ void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, if (mld_vif->emlsr.blocked_reasons) return; - IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n"); + IWL_DEBUG_EHT(mld, "EMLSR is unblocked\n"); iwl_mld_int_mlo_scan(mld, vif); } @@ -365,18 +359,17 @@ iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, switch (action) { case ESR_RECOMMEND_LEAVE: - IWL_DEBUG_INFO(mld_vif->mld, - "FW recommend leave reason = 0x%x\n", - le32_to_cpu(notif->leave_reason_mask)); + IWL_DEBUG_EHT(mld_vif->mld, + "FW recommend leave reason = 0x%x\n", + le32_to_cpu(notif->leave_reason_mask)); iwl_mld_exit_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_EXIT_FW_REQUEST, iwl_mld_get_primary_link(vif)); break; case ESR_FORCE_LEAVE: - IWL_DEBUG_INFO(mld_vif->mld, - "FW force leave reason = 0x%x\n", - le32_to_cpu(notif->leave_reason_mask)); + IWL_DEBUG_EHT(mld_vif->mld, "FW force leave reason = 0x%x\n", + le32_to_cpu(notif->leave_reason_mask)); fallthrough; case ESR_RECOMMEND_ENTER: default: @@ -412,11 +405,12 @@ void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, struct ieee80211_bss_conf *bss_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id); - IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", - le32_to_cpu(notif->activation) ? "enter" : "exit", - bss_conf ? bss_conf->link_id : -1, - le32_to_cpu(notif->link_id), - le32_to_cpu(notif->err_code)); + IWL_DEBUG_EHT(mld, + "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", + le32_to_cpu(notif->activation) ? "enter" : "exit", + bss_conf ? bss_conf->link_id : -1, + le32_to_cpu(notif->link_id), + le32_to_cpu(notif->err_code)); if (IWL_FW_CHECK(mld, !bss_conf, "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n", @@ -590,8 +584,8 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) spin_unlock_bh(&queue_counter->lock); } - IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", - total_tx, total_rx); + IWL_DEBUG_EHT(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", + total_tx, total_rx); /* If we don't have enough MPDUs - exit EMLSR */ if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH && @@ -603,10 +597,10 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) /* EMLSR is not active */ if (sec_link_id == -1) - return; + goto schedule; - IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", - sec_link_id, sec_link_tx, sec_link_rx); + IWL_DEBUG_EHT(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", + sec_link_id, sec_link_tx, sec_link_rx); /* Calculate the percentage of the secondary link TX/RX */ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; @@ -625,6 +619,7 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) return; } +schedule: /* Check again when the next window ends */ wiphy_delayed_work_queue(mld_vif->mld->wiphy, &mld_vif->emlsr.check_tpt_wk, @@ -702,10 +697,8 @@ iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, ret |= IWL_MLD_EMLSR_EXIT_CSA; if (ret) { - IWL_DEBUG_INFO(mld, - "Link %d is not allowed for EMLSR as %s\n", - link->link_id, - primary ? "primary" : "secondary"); + IWL_DEBUG_EHT(mld, "Link %d is not allowed for EMLSR as %s\n", + link->link_id, primary ? "primary" : "secondary"); iwl_mld_print_emlsr_exit(mld, ret); } @@ -869,13 +862,12 @@ iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif, reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD; if (reason_mask) { - IWL_DEBUG_INFO(mld, - "Links %d and %d are not a valid pair for EMLSR\n", - a->link_id, b->link_id); - IWL_DEBUG_INFO(mld, - "Links bandwidth are: %d and %d\n", - nl80211_chan_width_to_mhz(a->chandef->width), - nl80211_chan_width_to_mhz(b->chandef->width)); + IWL_DEBUG_EHT(mld, + "Links %d and %d are not a valid pair for EMLSR\n", + a->link_id, b->link_id); + IWL_DEBUG_EHT(mld, "Links bandwidth are: %d and %d\n", + nl80211_chan_width_to_mhz(a->chandef->width), + nl80211_chan_width_to_mhz(b->chandef->width)); iwl_mld_print_emlsr_exit(mld, reason_mask); } @@ -993,8 +985,8 @@ static void _iwl_mld_select_links(struct iwl_mld *mld, } set_active: - IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n", - new_active, new_primary); + IWL_DEBUG_EHT(mld, "Link selection result: 0x%x. Primary = %d\n", + new_active, new_primary); mld_vif->emlsr.selected_primary = new_primary; mld_vif->emlsr.selected_links = new_active; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c index 884973d0b344..4cf3920b005f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c @@ -589,8 +589,8 @@ void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0); - else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) - iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0); + else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, PHY_AIR_SNIFFER_NOTIF)) + iwl_mld_handle_phy_air_sniffer_notif(mld, napi, pkt); else iwl_mld_rx_notif(mld, rxb, pkt); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c index 4136c98030d0..4e37a288471e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c @@ -231,7 +231,9 @@ void iwl_mld_handle_roc_notif(struct iwl_mld *mld, struct ieee80211_vif *vif; vif = iwl_mld_find_roc_vif(mld, activity); - if (WARN_ON(!vif)) + if (IWL_FW_CHECK(mld, !vif, + "unexpected ROC notif from FW for activity %d\n", + activity)) return; mld_vif = iwl_mld_vif_from_mac80211(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c index 20d866dd92c2..6a76e3fcb581 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c @@ -18,41 +18,32 @@ /* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */ struct iwl_mld_rx_phy_data { - enum iwl_rx_phy_info_type info_type; - __le32 data0; - __le32 data1; - __le32 data2; - __le32 data3; - __le32 eht_data4; - __le32 data5; - __le16 data4; + struct iwl_rx_phy_air_sniffer_ntfy *ntfy; bool first_subframe; bool with_data; - __le32 rx_vec[4]; u32 rate_n_flags; u32 gp2_on_air_rise; + /* phy_info is only valid when we have a frame, i.e. with_data=true */ u16 phy_info; u8 energy_a, energy_b; }; static void -iwl_mld_fill_phy_data(struct iwl_mld *mld, - struct iwl_rx_mpdu_desc *desc, - struct iwl_mld_rx_phy_data *phy_data) +iwl_mld_fill_phy_data_from_mpdu(struct iwl_mld *mld, + struct iwl_rx_mpdu_desc *desc, + struct iwl_mld_rx_phy_data *phy_data) { + if (unlikely(mld->monitor.phy.valid)) { + mld->monitor.phy.used = true; + phy_data->ntfy = &mld->monitor.phy.data; + } + phy_data->phy_info = le16_to_cpu(desc->phy_info); phy_data->rate_n_flags = iwl_v3_rate_from_v2_v3(desc->v3.rate_n_flags, mld->fw_rates_ver_3); phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); phy_data->energy_a = desc->v3.energy_a; phy_data->energy_b = desc->v3.energy_b; - phy_data->data0 = desc->v3.phy_data0; - phy_data->data1 = desc->v3.phy_data1; - phy_data->data2 = desc->v3.phy_data2; - phy_data->data3 = desc->v3.phy_data3; - phy_data->data4 = desc->phy_data4; - phy_data->eht_data4 = desc->phy_eht_data4; - phy_data->data5 = desc->v3.phy_data5; phy_data->with_data = true; } @@ -217,26 +208,19 @@ static void iwl_mld_fill_signal(struct iwl_mld *mld, int link_id, } static void -iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_radiotap_he *he, - struct ieee80211_radiotap_he_mu *he_mu, - struct ieee80211_rx_status *rx_status) +iwl_mld_he_set_ru_alloc(struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_he *he, + u8 ru_with_p80) { - /* Unfortunately, we have to leave the mac80211 data - * incorrect for the case that we receive an HE-MU - * transmission and *don't* have the HE phy data (due - * to the bits being used for TSF). This shouldn't - * happen though as management frames where we need - * the TSF/timers are not be transmitted in HE-MU. - */ - u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); - u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u8 ru = ru_with_p80 >> 1; + u8 p80 = ru_with_p80 & 1; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); switch (ru) { case 0 ... 36: @@ -266,227 +250,262 @@ iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data, rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; } + he->data2 |= le16_encode_bits(offs, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); - he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); - if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); -#define CHECK_BW(bw) \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ - RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ - RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) - CHECK_BW(20); - CHECK_BW(40); - CHECK_BW(80); - CHECK_BW(160); - - if (he_mu) - he_mu->flags2 |= - le16_encode_bits(u32_get_bits(rate_n_flags, - RATE_MCS_CHAN_WIDTH_MSK), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG) - he->data6 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(u32_get_bits(rate_n_flags, - RATE_MCS_CHAN_WIDTH_MSK), - IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); + he->data2 |= le16_encode_bits(p80, IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); } +#define RTAP_ENC_HE(src, src_msk, dst_msk) \ + le16_encode_bits(le32_get_bits(src, src_msk), dst_msk) + static void -iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_radiotap_he_mu *he_mu) +iwl_mld_decode_he_mu(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status) { - u32 phy_data2 = le32_to_cpu(phy_data->data2); - u32 phy_data3 = le32_to_cpu(phy_data->data3); - u16 phy_data4 = le16_to_cpu(phy_data->data4); u32 rate_n_flags = phy_data->rate_n_flags; - if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) { + he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b, + OFDM_RX_FRAME_HE_SIGB_DCM, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b, + OFDM_RX_FRAME_HE_SIGB_MCS, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_PRMBL_PUNC_TYPE, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_MU_NUM_OF_SIGB_SYM_OR_USER_NUM, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags2 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.b, + OFDM_RX_FRAME_HE_MU_SIGB_COMP, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + + if (phy_data->ntfy->flags & IWL_SNIF_FLAG_VALID_RU && + le32_get_bits(phy_data->ntfy->sigs.he.cmn[2], + OFDM_RX_FRAME_HE_COMMON_CC1_CRC_OK)) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); he_mu->flags1 |= - le16_encode_bits(u32_get_bits(phy_data4, - IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); - - he_mu->ru_ch1[0] = u32_get_bits(phy_data2, - IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0); - he_mu->ru_ch1[1] = u32_get_bits(phy_data3, - IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1); - he_mu->ru_ch1[2] = u32_get_bits(phy_data2, - IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2); - he_mu->ru_ch1[3] = u32_get_bits(phy_data3, - IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3); + RTAP_ENC_HE(phy_data->ntfy->sigs.he.cmn[2], + OFDM_RX_FRAME_HE_CENTER_RU_CC1, + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); + + he_mu->ru_ch1[0] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[0], + OFDM_RX_FRAME_HE_RU_ALLOC_0_A1); + he_mu->ru_ch1[1] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[1], + OFDM_RX_FRAME_HE_RU_ALLOC_1_C1); + he_mu->ru_ch1[2] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[0], + OFDM_RX_FRAME_HE_RU_ALLOC_0_A2); + he_mu->ru_ch1[3] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[1], + OFDM_RX_FRAME_HE_RU_ALLOC_1_C2); } - if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) && + if (phy_data->ntfy->flags & IWL_SNIF_FLAG_VALID_RU && + le32_get_bits(phy_data->ntfy->sigs.he.cmn[2], + OFDM_RX_FRAME_HE_COMMON_CC2_CRC_OK) && (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); he_mu->flags2 |= - le16_encode_bits(u32_get_bits(phy_data4, - IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); - - he_mu->ru_ch2[0] = u32_get_bits(phy_data2, - IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0); - he_mu->ru_ch2[1] = u32_get_bits(phy_data3, - IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1); - he_mu->ru_ch2[2] = u32_get_bits(phy_data2, - IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2); - he_mu->ru_ch2[3] = u32_get_bits(phy_data3, - IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3); + RTAP_ENC_HE(phy_data->ntfy->sigs.he.cmn[2], + OFDM_RX_FRAME_HE_CENTER_RU_CC2, + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); + + he_mu->ru_ch2[0] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[0], + OFDM_RX_FRAME_HE_RU_ALLOC_0_B1); + he_mu->ru_ch2[1] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[1], + OFDM_RX_FRAME_HE_RU_ALLOC_1_D1); + he_mu->ru_ch2[2] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[0], + OFDM_RX_FRAME_HE_RU_ALLOC_0_B2); + he_mu->ru_ch2[3] = le32_get_bits(phy_data->ntfy->sigs.he.cmn[1], + OFDM_RX_FRAME_HE_RU_ALLOC_1_D2); } + +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); +#undef CHECK_BW + + he_mu->flags2 |= + le16_encode_bits(u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); + + iwl_mld_he_set_ru_alloc(rx_status, he, + le32_get_bits(phy_data->ntfy->sigs.he.b, + OFDM_RX_FRAME_HE_SIGB_STA_RU)); +} + +static void +iwl_mld_decode_he_tb_phy_data(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_he *he, + struct ieee80211_rx_status *rx_status) +{ + u32 rate_n_flags = phy_data->rate_n_flags; + u32 nsts; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); + + he->data4 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a1, + OFDM_RX_HE_TRIG_SPATIAL_REUSE_1, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); + he->data4 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a1, + OFDM_RX_HE_TRIG_SPATIAL_REUSE_2, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); + he->data4 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a1, + OFDM_RX_HE_TRIG_SPATIAL_REUSE_3, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); + he->data4 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a1, + OFDM_RX_HE_TRIG_SPATIAL_REUSE_4, + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a1, + OFDM_RX_HE_TRIG_BSS_COLOR, + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); +#undef CHECK_BW + + he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | + le16_encode_bits(u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK), + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); + + if (!(phy_data->ntfy->flags & IWL_SNIF_FLAG_VALID_TB_RX)) + return; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_CODING_EXTRA_SYM, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data6 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_DOPPLER, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_PE_DISAMBIG, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_NUM_OF_LTF_SYM, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + he->data6 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he_tb.a2, + OFDM_RX_HE_TRIG_TXOP_DURATION, + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + + iwl_mld_he_set_ru_alloc(rx_status, he, + le32_get_bits(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_RU)); + + nsts = le32_get_bits(phy_data->ntfy->sigs.he_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_NSTS) + 1; + rx_status->nss = nsts >> !!(rate_n_flags & RATE_MCS_STBC_MSK); } static void iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, - struct ieee80211_rx_status *rx_status, - int queue) + struct ieee80211_rx_status *rx_status) { - switch (phy_data->info_type) { - case IWL_RX_PHY_INFO_TYPE_NONE: - case IWL_RX_PHY_INFO_TYPE_CCK: - case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: - case IWL_RX_PHY_INFO_TYPE_HT: - case IWL_RX_PHY_INFO_TYPE_VHT_SU: - case IWL_RX_PHY_INFO_TYPE_VHT_MU: - case IWL_RX_PHY_INFO_TYPE_EHT_MU: - case IWL_RX_PHY_INFO_TYPE_EHT_TB: - case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: - case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: - return; - case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: - he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, - IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), - IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, - IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), - IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, - IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), - IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, - IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), - IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); - fallthrough; - case IWL_RX_PHY_INFO_TYPE_HE_SU: - case IWL_RX_PHY_INFO_TYPE_HE_MU: - case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: - case IWL_RX_PHY_INFO_TYPE_HE_TB: - /* HE common */ - he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); - he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), - IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); - if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && - phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { - he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); - he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_UPLINK), - IEEE80211_RADIOTAP_HE_DATA3_UL_DL); - } - he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), - IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); - he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), - IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); - he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), - IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); - he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1, - IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); - he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), - IEEE80211_RADIOTAP_HE_DATA6_TXOP); - he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_DOPPLER), - IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); - break; - } + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u32 nsts; - switch (phy_data->info_type) { - case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: - case IWL_RX_PHY_INFO_TYPE_HE_MU: - case IWL_RX_PHY_INFO_TYPE_HE_SU: - he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), - IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); - break; - default: - /* nothing here */ - break; - } + switch (he_type) { + case RATE_MCS_HE_TYPE_TRIG: + iwl_mld_decode_he_tb_phy_data(phy_data, he, rx_status); + /* that's it, below is only for SU/MU */ + return; + case RATE_MCS_HE_TYPE_MU: + iwl_mld_decode_he_mu(phy_data, he, he_mu, rx_status); - switch (phy_data->info_type) { - case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: - he_mu->flags1 |= - le16_encode_bits(le16_get_bits(phy_data->data4, - IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); - he_mu->flags1 |= - le16_encode_bits(le16_get_bits(phy_data->data4, - IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); - he_mu->flags2 |= - le16_encode_bits(le16_get_bits(phy_data->data4, - IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - iwl_mld_decode_he_mu_ext(phy_data, he_mu); - fallthrough; - case IWL_RX_PHY_INFO_TYPE_HE_MU: - he_mu->flags2 |= - le16_encode_bits(le32_get_bits(phy_data->data1, - IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); - he_mu->flags2 |= - le16_encode_bits(le32_get_bits(phy_data->data1, - IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); - fallthrough; - case IWL_RX_PHY_INFO_TYPE_HE_TB: - case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: - iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status); + nsts = le32_get_bits(phy_data->ntfy->sigs.he.b, + OFDM_RX_FRAME_HE_SIGB_NSTS) + 1; break; - case IWL_RX_PHY_INFO_TYPE_HE_SU: + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); - he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_HE_BEAM_CHNG), - IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); - break; - default: - /* nothing */ + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_BEAM_CHANGE, + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + + nsts = le32_get_bits(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_NSTS) + 1; break; } + + rx_status->nss = nsts >> !!(rate_n_flags & RATE_MCS_STBC_MSK); + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_CODING_EXTRA_SYM, + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_PE_DISAMBIG, + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data5 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_MU_NUM_OF_LTF_SYM, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + he->data6 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_TXOP_DURATION, + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a2, + OFDM_RX_FRAME_HE_DOPPLER, + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); + + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_BSS_COLOR, + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + he->data3 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_UL_FLAG, + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + he->data4 |= RTAP_ENC_HE(phy_data->ntfy->sigs.he.a1, + OFDM_RX_FRAME_HE_SPATIAL_REUSE, + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); } -static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb, - struct iwl_mld_rx_phy_data *phy_data, - int queue) +static void iwl_mld_rx_he(struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_he *he = NULL; @@ -510,48 +529,28 @@ static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb, .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; - u16 phy_info = phy_data->phy_info; he = skb_put_data(skb, &known, sizeof(known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE; - if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || - phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { - he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); - rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; - } - - /* report the AMPDU-EOF bit on single frames */ - if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } - - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) - iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status, - queue); - - /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && - (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } - - if (he_type == RATE_MCS_HE_TYPE_EXT_SU && - rate_n_flags & RATE_MCS_HE_106T_MSK) { - rx_status->bw = RATE_INFO_BW_HE_RU; - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - } - - /* actually data is filled in mac80211 */ - if (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_EXT_SU) + switch (he_type) { + case RATE_MCS_HE_TYPE_EXT_SU: + /* + * Except for this special case we won't have + * HE RU allocation info outside of monitor mode + * since we don't get the PHY notif. + */ + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rx_status->bw = RATE_INFO_BW_HE_RU; + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + fallthrough; + case RATE_MCS_HE_TYPE_SU: + /* actual data is filled in mac80211 */ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + break; + } #define CHECK_TYPE(F) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ @@ -567,8 +566,7 @@ static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb, if (rate_n_flags & RATE_MCS_BF_MSK) he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); - switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> - RATE_MCS_HE_GI_LTF_POS) { + switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) { case 0: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; @@ -609,37 +607,52 @@ static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb, he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + + if (likely(!phy_data->ntfy)) + return; + + if (he_type == RATE_MCS_HE_TYPE_MU) { + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; + } + + iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status); } static void iwl_mld_decode_lsig(struct sk_buff *skb, struct iwl_mld_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK; struct ieee80211_radiotap_lsig *lsig; + u32 lsig_len, rate; - switch (phy_data->info_type) { - case IWL_RX_PHY_INFO_TYPE_HT: - case IWL_RX_PHY_INFO_TYPE_VHT_SU: - case IWL_RX_PHY_INFO_TYPE_VHT_MU: - case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: - case IWL_RX_PHY_INFO_TYPE_HE_SU: - case IWL_RX_PHY_INFO_TYPE_HE_MU: - case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: - case IWL_RX_PHY_INFO_TYPE_HE_TB: - case IWL_RX_PHY_INFO_TYPE_EHT_MU: - case IWL_RX_PHY_INFO_TYPE_EHT_TB: - case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: - case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: - lsig = skb_put(skb, sizeof(*lsig)); - lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); - lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1, - IWL_RX_PHY_DATA1_LSIG_LEN_MASK), - IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); - rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; - break; - default: - break; - } + if (likely(!phy_data->ntfy)) + return; + + /* + * Technically legacy CCK/OFDM frames don't have an L-SIG + * since that's the compat format for HT (non-greenfield) + * and up. However, it's meant to be compatible with the + * LENGTH and RATE fields in Clause 17 and 18 OFDM frames + * so include the field for any non-CCK frame. For CCK it + * cannot work, since the LENGTH field for them is 16-bit + * and the radiotap field only has 12 bits. + */ + if (format == RATE_MCS_MOD_TYPE_CCK) + return; + + lsig_len = le32_get_bits(phy_data->ntfy->legacy_sig.ofdm, + OFDM_RX_LEGACY_LENGTH); + rate = le32_get_bits(phy_data->ntfy->legacy_sig.ofdm, OFDM_RX_RATE); + + lsig = skb_put(skb, sizeof(*lsig)); + lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN) | + cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN); + lsig->data2 = le16_encode_bits(lsig_len, + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH) | + le16_encode_bits(rate, IEEE80211_RADIOTAP_LSIG_DATA2_RATE); + rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; } /* Put a TLV on the skb and return data pointer @@ -667,209 +680,144 @@ iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len) (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \ } while (0) -#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ - eht->data[(rt_data)] |= \ - (cpu_to_le32 \ - (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \ - LE32_DEC_ENC(data ## fw_data, \ - IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \ - IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru)) +static void iwl_mld_decode_eht_usig_tb(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_eht_usig *usig) +{ + __le32 usig_a1 = phy_data->ntfy->sigs.eht_tb.usig_a1; + __le32 usig_a2 = phy_data->ntfy->sigs.eht_tb.usig_a2_eht; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + OFDM_RX_FRAME_EHT_USIG1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_TRIG_SPATIAL_REUSE_1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_TRIG_SPATIAL_REUSE_2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_TRIG_USIG2_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD); +} -#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ - __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) +static void iwl_mld_decode_eht_usig_non_tb(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_eht_usig *usig) +{ + __le32 usig_a1 = phy_data->ntfy->sigs.eht.usig_a1; + __le32 usig_a2 = phy_data->ntfy->sigs.eht.usig_a2_eht; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + OFDM_RX_FRAME_EHT_USIG1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + OFDM_RX_FRAME_EHT_USIG1_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_PUNC_CHANNEL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B8, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_SIG_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + OFDM_RX_FRAME_EHT_SIG_SYM_NUM, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); +} -#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1 -#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2 -#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2 -#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2 -#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3 -#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3 -#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3 -#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4 +static void iwl_mld_decode_eht_usig(struct iwl_mld_rx_phy_data *phy_data, + struct sk_buff *skb) +{ + u32 he_type = phy_data->rate_n_flags & RATE_MCS_HE_TYPE_MSK; + __le32 usig_a1 = phy_data->ntfy->sigs.eht.usig_a1; + __le32 usig_a2 = phy_data->ntfy->sigs.eht.usig_a2_eht; + struct ieee80211_radiotap_eht_usig *usig; + u32 bw; -#define IWL_RX_RU_DATA_A1 2 -#define IWL_RX_RU_DATA_A2 2 -#define IWL_RX_RU_DATA_B1 2 -#define IWL_RX_RU_DATA_B2 4 -#define IWL_RX_RU_DATA_C1 3 -#define IWL_RX_RU_DATA_C2 3 -#define IWL_RX_RU_DATA_D1 4 -#define IWL_RX_RU_DATA_D2 4 + usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, + sizeof(*usig)); -#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \ - _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \ - rt_ru, \ - IWL_RX_RU_DATA_ ## fw_ru, \ - fw_ru) + BUILD_BUG_ON(offsetof(union iwl_sigs, eht.usig_a1) != + offsetof(union iwl_sigs, eht_tb.usig_a1)); + BUILD_BUG_ON(offsetof(union iwl_sigs, eht.usig_a2_eht) != + offsetof(union iwl_sigs, eht_tb.usig_a2_eht)); -static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld, - struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_rx_status *rx_status, - struct ieee80211_radiotap_eht *eht, - struct ieee80211_radiotap_eht_usig *usig) -{ - if (phy_data->with_data) { - __le32 data1 = phy_data->data1; - __le32 data2 = phy_data->data2; - __le32 data3 = phy_data->data3; - __le32 data4 = phy_data->eht_data4; - __le32 data5 = phy_data->data5; - u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; - - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, - IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, - IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4, - IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); - IWL_MLD_ENC_USIG_VALUE_MASK - (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN); - eht->user_info[0] |= - cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) | - LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR, - IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID); +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_ ## bw ## _VAL) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); +#undef CHECK_BW + BUILD_BUG_ON(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 != + RATE_MCS_CHAN_WIDTH_320_VAL); + bw = u32_get_bits(phy_data->rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK); + /* specific handling for 320MHz-1/320MHz-2 */ + if (bw == RATE_MCS_CHAN_WIDTH_320_VAL) + bw += le32_get_bits(usig_a1, OFDM_RX_FRAME_EHT_BW320_SLOT); + usig->common |= le32_encode_bits(bw, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW); - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M); - eht->data[7] |= LE32_DEC_ENC - (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA, - IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + usig->common |= LE32_DEC_ENC(usig_a1, OFDM_RX_FRAME_ENHANCED_WIFI_UL_FLAG, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(usig_a1, OFDM_RX_FRAME_ENHANCED_WIFI_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); - /* - * Hardware labels the content channels/RU allocation values - * as follows: - * Content Channel 1 Content Channel 2 - * 20 MHz: A1 - * 40 MHz: A1 B1 - * 80 MHz: A1 C1 B1 D1 - * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2 - * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4 - * - * However firmware can only give us A1-D2, so the higher - * frequencies are missing. - */ + if (le32_get_bits(usig_a1, OFDM_RX_FRAME_EHT_USIG1_VALIDATE) && + le32_get_bits(usig_a2, OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B2) && + le32_get_bits(usig_a2, OFDM_RX_FRAME_EHT_USIG2_VALIDATE_B8)) + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK); - switch (phy_bw) { - case RATE_MCS_CHAN_WIDTH_320: - /* additional values are missing in RX metadata */ - fallthrough; - case RATE_MCS_CHAN_WIDTH_160: - /* content channel 1 */ - IWL_MLD_ENC_EHT_RU(1_2_1, A2); - IWL_MLD_ENC_EHT_RU(1_2_2, C2); - /* content channel 2 */ - IWL_MLD_ENC_EHT_RU(2_2_1, B2); - IWL_MLD_ENC_EHT_RU(2_2_2, D2); - fallthrough; - case RATE_MCS_CHAN_WIDTH_80: - /* content channel 1 */ - IWL_MLD_ENC_EHT_RU(1_1_2, C1); - /* content channel 2 */ - IWL_MLD_ENC_EHT_RU(2_1_2, D1); - fallthrough; - case RATE_MCS_CHAN_WIDTH_40: - /* content channel 2 */ - IWL_MLD_ENC_EHT_RU(2_1_1, B1); - fallthrough; - case RATE_MCS_CHAN_WIDTH_20: - IWL_MLD_ENC_EHT_RU(1_1_1, A1); - break; - } - } else { - __le32 usig_a1 = phy_data->rx_vec[0]; - __le32 usig_a2 = phy_data->rx_vec[1]; - - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, - IWL_RX_USIG_A1_DISREGARD, - IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, - IWL_RX_USIG_A1_VALIDATE, - IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_PPDU_TYPE, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_PUNC_CHANNEL, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_SIG_MCS, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); - IWL_MLD_ENC_USIG_VALUE_MASK - (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_CRC_OK, - IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC); - } -} + usig->common |= LE32_DEC_ENC(usig_a1, + OFDM_RX_FRAME_ENHANCED_WIFI_TXOP_DURATION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); -static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld, - struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_rx_status *rx_status, - struct ieee80211_radiotap_eht *eht, - struct ieee80211_radiotap_eht_usig *usig) -{ - if (phy_data->with_data) { - __le32 data5 = phy_data->data5; - - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, - IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, - IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); - - IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, - IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); - } else { - __le32 usig_a1 = phy_data->rx_vec[0]; - __le32 usig_a2 = phy_data->rx_vec[1]; - - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, - IWL_RX_USIG_A1_DISREGARD, - IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_PPDU_TYPE, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD); - IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, - IWL_RX_USIG_A2_EHT_CRC_OK, - IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC); - } + if (!le32_get_bits(usig_a2, OFDM_RX_USIG_CRC_OK)) + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN); + usig->common |= LE32_DEC_ENC(usig_a1, + OFDM_RX_FRAME_ENHANCED_WIFI_VER_ID, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER); + + if (he_type == RATE_MCS_HE_TYPE_TRIG) + iwl_mld_decode_eht_usig_tb(phy_data, usig); + else + iwl_mld_decode_eht_usig_non_tb(phy_data, usig); } -static void iwl_mld_decode_eht_ru(struct iwl_mld *mld, - struct ieee80211_rx_status *rx_status, - struct ieee80211_radiotap_eht *eht) +static void +iwl_mld_eht_set_ru_alloc(struct ieee80211_rx_status *rx_status, + u32 ru_with_p80) { - u32 ru = le32_get_bits(eht->data[8], - IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); enum nl80211_eht_ru_alloc nl_ru; + u32 ru = ru_with_p80 >> 1; - /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields - * in an EHT variant User Info field + /* + * HW always uses trigger frame format: + * + * Draft PIEEE802.11be D7.0 Table 9-46l - Encoding of the PS160 and + * RU Allocation subfields in an EHT variant User Info field */ switch (ru) { @@ -929,135 +877,228 @@ static void iwl_mld_decode_eht_ru(struct iwl_mld *mld, rx_status->eht.ru = nl_ru; } -static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld, - struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_rx_status *rx_status, - struct ieee80211_radiotap_eht *eht, - struct ieee80211_radiotap_eht_usig *usig) - +static void iwl_mld_decode_eht_tb(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) { - __le32 data0 = phy_data->data0; - __le32 data1 = phy_data->data1; - __le32 usig_a1 = phy_data->rx_vec[0]; - u8 info_type = phy_data->info_type; - - /* Not in EHT range */ - if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU || - info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT) + if (!(phy_data->ntfy->flags & IWL_SNIF_FLAG_VALID_TB_RX)) return; - usig->common |= cpu_to_le32 - (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | - IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN); - if (phy_data->with_data) { - usig->common |= LE32_DEC_ENC(data0, - IWL_RX_PHY_DATA0_EHT_UPLINK, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); - usig->common |= LE32_DEC_ENC(data0, - IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); - } else { - usig->common |= LE32_DEC_ENC(usig_a1, - IWL_RX_USIG_A1_UL_FLAG, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); - usig->common |= LE32_DEC_ENC(usig_a1, - IWL_RX_USIG_A1_BSS_COLOR, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); - } - - usig->common |= - cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED); - usig->common |= - LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK); - - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE); - eht->data[0] |= LE32_DEC_ENC(data0, - IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK, - IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT | + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80); - /* All RU allocating size/index is in TB format */ - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT); - eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160, + eht->data[8] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx0, + OFDM_UCODE_TRIG_BASE_PS160, IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160); - eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0, - IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0); - eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7, + eht->data[8] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_RU, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0 | IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_CODING_EXTRA_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_PE_DISAMBIG, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_NUM_OF_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + eht->data[1] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht_tb.tb_rx0, + OFDM_UCODE_TRIG_BASE_RX_RU_P80, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); - iwl_mld_decode_eht_ru(mld, rx_status, eht); - - /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set - * which is on only in case of monitor mode so no need to check monitor - * mode - */ - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80); - eht->data[1] |= - le32_encode_bits(mld->monitor.p80, - IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); - - usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN); - if (phy_data->with_data) - usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); - else - usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + iwl_mld_eht_set_ru_alloc(rx_status, + le32_get_bits(phy_data->ntfy->sigs.eht_tb.tb_rx1, + OFDM_UCODE_TRIG_BASE_RX_RU)); +} - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM); - eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM, - IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); +static void iwl_mld_eht_decode_user_ru(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_eht *eht) +{ + u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM); - eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK, - IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + if (!(phy_data->ntfy->flags & IWL_SNIF_FLAG_VALID_RU)) + return; - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM); - eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG, - IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); +#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + eht->data[(rt_data)] |= \ + (cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \ + LE32_DEC_ENC(phy_data->ntfy->sigs.eht.cmn[fw_data], \ + OFDM_RX_FRAME_EHT_RU_ALLOC_ ## fw_data ## _ ## fw_ru, \ + IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru)) - /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */ +#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) - if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK)) - usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); +#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_3 4 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_3 4 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_4 5 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_4 5 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_5 5 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_5 6 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_6 6 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_6 6 + +#define IWL_RX_RU_DATA_A1 0 +#define IWL_RX_RU_DATA_A2 0 +#define IWL_RX_RU_DATA_A3 0 +#define IWL_RX_RU_DATA_A4 4 +#define IWL_RX_RU_DATA_B1 1 +#define IWL_RX_RU_DATA_B2 1 +#define IWL_RX_RU_DATA_B3 1 +#define IWL_RX_RU_DATA_B4 4 +#define IWL_RX_RU_DATA_C1 2 +#define IWL_RX_RU_DATA_C2 2 +#define IWL_RX_RU_DATA_C3 2 +#define IWL_RX_RU_DATA_C4 5 +#define IWL_RX_RU_DATA_D1 3 +#define IWL_RX_RU_DATA_D2 3 +#define IWL_RX_RU_DATA_D3 3 +#define IWL_RX_RU_DATA_D4 5 - usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN); - usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER, - IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER); +#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \ + _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \ + rt_ru, \ + IWL_RX_RU_DATA_ ## fw_ru, \ + fw_ru) /* - * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE, - * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS + * Hardware labels the content channels/RU allocation values + * as follows: + * + * Content Channel 1 Content Channel 2 + * 20 MHz: A1 + * 40 MHz: A1 B1 + * 80 MHz: A1 C1 B1 D1 + * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2 + * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4 */ - eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF); - eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM, + switch (phy_bw) { + case RATE_MCS_CHAN_WIDTH_320: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_2_3, A3); + IWL_MLD_ENC_EHT_RU(1_2_4, C3); + IWL_MLD_ENC_EHT_RU(1_2_5, A4); + IWL_MLD_ENC_EHT_RU(1_2_6, C4); + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_2_3, B3); + IWL_MLD_ENC_EHT_RU(2_2_4, D3); + IWL_MLD_ENC_EHT_RU(2_2_5, B4); + IWL_MLD_ENC_EHT_RU(2_2_6, D4); + fallthrough; + case RATE_MCS_CHAN_WIDTH_160: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_2_1, A2); + IWL_MLD_ENC_EHT_RU(1_2_2, C2); + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_2_1, B2); + IWL_MLD_ENC_EHT_RU(2_2_2, D2); + fallthrough; + case RATE_MCS_CHAN_WIDTH_80: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_1_2, C1); + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_1_2, D1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_40: + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_1_1, B1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_20: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_1_1, A1); + break; + } +} + +static void iwl_mld_decode_eht_non_tb(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) +{ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + /* All RU allocating size/index is in TB format */ + IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT | + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80 | + IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M); + + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + eht->data[8] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b2, + OFDM_RX_FRAME_EHT_STA_RU_PS160, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160); + eht->data[8] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b2, + OFDM_RX_FRAME_EHT_STA_RU, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0 | + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_CODING_EXTRA_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_PE_DISAMBIG, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + eht->data[0] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_NUM_OF_LTF_SYM, IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + eht->data[1] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b2, + OFDM_RX_FRAME_EHT_STA_RU_P80, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); + eht->data[7] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_NUM_OF_USERS, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + + iwl_mld_eht_decode_user_ru(phy_data, eht); + + iwl_mld_eht_set_ru_alloc(rx_status, + le32_get_bits(phy_data->ntfy->sigs.eht.b2, + OFDM_RX_FRAME_EHT_STA_RU)); +} - if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT || - info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB) - iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig); +static void iwl_mld_decode_eht_phy_data(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) +{ + u32 he_type = phy_data->rate_n_flags & RATE_MCS_HE_TYPE_MSK; - if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT || - info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU) - iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig); + if (he_type == RATE_MCS_HE_TYPE_TRIG) + iwl_mld_decode_eht_tb(phy_data, rx_status, eht); + else + iwl_mld_decode_eht_non_tb(phy_data, rx_status, eht); } static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb, - struct iwl_mld_rx_phy_data *phy_data, - int queue) + struct iwl_mld_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_eht *eht; - struct ieee80211_radiotap_eht_usig *usig; size_t eht_len = sizeof(*eht); - u32 rate_n_flags = phy_data->rate_n_flags; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; /* EHT and HE have the same values for LTF */ u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; - u16 phy_info = phy_data->phy_info; - u32 bw; /* u32 for 1 user_info */ if (phy_data->with_data) @@ -1065,50 +1106,7 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb, eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len); - usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, - sizeof(*usig)); rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; - usig->common |= - cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN); - - /* specific handling for 320MHz */ - bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK); - if (bw == RATE_MCS_CHAN_WIDTH_320_VAL) - bw += le32_get_bits(phy_data->data0, - IWL_RX_PHY_DATA0_EHT_BW320_SLOT); - - usig->common |= cpu_to_le32 - (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw)); - - /* report the AMPDU-EOF bit on single frames */ - if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->data0 & - cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } - - /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && - (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->data0 & - cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } - - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) - iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig); - -#define CHECK_TYPE(F) \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ - (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) - - CHECK_TYPE(SU); - CHECK_TYPE(EXT_SU); - CHECK_TYPE(MU); - CHECK_TYPE(TRIG); switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) { case 0: @@ -1144,20 +1142,18 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb, if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) { eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); - eht->data[0] |= cpu_to_le32 - (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, - ltf) | - FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, - rx_status->eht.gi)); + eht->data[0] |= le32_encode_bits(ltf, + IEEE80211_RADIOTAP_EHT_DATA0_LTF) | + le32_encode_bits(rx_status->eht.gi, + IEEE80211_RADIOTAP_EHT_DATA0_GI); } if (!phy_data->with_data) { eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S); - eht->data[7] |= - le32_encode_bits(le32_get_bits(phy_data->rx_vec[2], - RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK), - IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + eht->data[7] |= LE32_DEC_ENC(phy_data->ntfy->sigs.eht.b1, + OFDM_RX_FRAME_EHT_NSTS, + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); if (rate_n_flags & RATE_MCS_BF_MSK) eht->data[7] |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); @@ -1177,14 +1173,28 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb, eht->user_info[0] |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); - eht->user_info[0] |= cpu_to_le32 - (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS, - u32_get_bits(rate_n_flags, - RATE_VHT_MCS_RATE_CODE_MSK)) | - FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O, - u32_get_bits(rate_n_flags, - RATE_MCS_NSS_MSK))); + eht->user_info[0] |= + le32_encode_bits(u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK), + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + le32_encode_bits(u32_get_bits(rate_n_flags, + RATE_MCS_NSS_MSK), + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); } + + if (likely(!phy_data->ntfy)) + return; + + if (phy_data->with_data) { + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) | + LE32_DEC_ENC(phy_data->ntfy->sigs.eht.user_id, + OFDM_RX_FRAME_EHT_USER_FIELD_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID); + } + + iwl_mld_decode_eht_usig(phy_data, skb); + iwl_mld_decode_eht_phy_data(phy_data, rx_status, eht); } #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1207,8 +1217,9 @@ static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld, radiotap->oui[0] = 0xf6; radiotap->oui[1] = 0x54; radiotap->oui[2] = 0x25; - /* radiotap sniffer config sub-namespace */ + /* Intel OUI default radiotap subtype */ radiotap->oui_subtype = 1; + /* Sniffer config element type */ radiotap->vendor_type = 0; /* fill the data now */ @@ -1219,34 +1230,58 @@ static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld, } #endif -/* Note: hdr can be NULL */ -static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id, - struct ieee80211_hdr *hdr, - struct sk_buff *skb, - struct iwl_mld_rx_phy_data *phy_data, - int queue) +static void iwl_mld_add_rtap_sniffer_phy_data(struct iwl_mld *mld, + struct sk_buff *skb, + struct iwl_rx_phy_air_sniffer_ntfy *ntfy) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - u32 rate_n_flags = phy_data->rate_n_flags; - u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); - bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK; + struct ieee80211_radiotap_vendor_content *radiotap; + const u16 vendor_data_len = sizeof(*ntfy); - phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE; + radiotap = + iwl_mld_radiotap_put_tlv(skb, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE, + sizeof(*radiotap) + vendor_data_len); - if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) - phy_data->info_type = - le32_get_bits(phy_data->data1, - IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + /* Intel OUI */ + radiotap->oui[0] = 0xf6; + radiotap->oui[1] = 0x54; + radiotap->oui[2] = 0x25; + /* Intel OUI default radiotap subtype */ + radiotap->oui_subtype = 1; + /* PHY data element type */ + radiotap->vendor_type = cpu_to_le16(1); - /* set the preamble flag if appropriate */ - if (format == RATE_MCS_MOD_TYPE_CCK && - phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) - rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + /* fill the data now */ + memcpy(radiotap->data, ntfy, vendor_data_len); - iwl_mld_fill_signal(mld, link_id, hdr, rx_status, phy_data); + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; +} + +static void +iwl_mld_set_rx_nonlegacy_rate_info(u32 rate_n_flags, + struct ieee80211_rx_status *rx_status) +{ + u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); + + /* NSS may be overridden by PHY ntfy with full value */ + rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; +} + +static void iwl_mld_set_rx_rate(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status) +{ + u32 rate_n_flags = phy_data->rate_n_flags; + u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK; - /* This may be overridden by iwl_mld_rx_he() to HE_RU */ + /* bandwidth may be overridden to RU by PHY ntfy */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; @@ -1264,17 +1299,93 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id, break; } - /* must be before L-SIG data */ - if (format == RATE_MCS_MOD_TYPE_HE) - iwl_mld_rx_he(mld, skb, phy_data, queue); + switch (format) { + case RATE_MCS_MOD_TYPE_CCK: + if (phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + fallthrough; + case RATE_MCS_MOD_TYPE_LEGACY_OFDM: { + int rate = + iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + rx_status->band); - iwl_mld_decode_lsig(skb, phy_data); + /* override BW - it could be DUP and indicate the wrong BW */ + rx_status->bw = RATE_INFO_BW_20; + + /* valid rate */ + if (rate >= 0 && rate <= 0xFF) { + rx_status->rate_idx = rate; + break; + } + + /* invalid rate */ + rx_status->rate_idx = 0; + + /* + * In monitor mode we can see CCK frames on 5 or 6 GHz, usually + * just the (possibly malformed) PHY header by accident, since + * the decoder doesn't seem to turn off CCK. We cannot correctly + * encode the rate to mac80211 (and therefore not in radiotap) + * since we give the per-band index which doesn't cover those + * rates. + */ + if (!mld->monitor.on && net_ratelimit()) + IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n", + rate_n_flags, rx_status->band); + break; + } + case RATE_MCS_MOD_TYPE_HT: + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + case RATE_MCS_MOD_TYPE_VHT: + rx_status->encoding = RX_ENC_VHT; + iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status); + break; + case RATE_MCS_MOD_TYPE_HE: + rx_status->encoding = RX_ENC_HE; + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status); + break; + case RATE_MCS_MOD_TYPE_EHT: + rx_status->encoding = RX_ENC_EHT; + iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status); + break; + default: + WARN_ON_ONCE(1); + } + + if (format != RATE_MCS_MOD_TYPE_CCK && is_sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; +} + +/* Note: hdr can be NULL */ +static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id, + struct ieee80211_hdr *hdr, + struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u32 rate_n_flags = phy_data->rate_n_flags; + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + + iwl_mld_fill_signal(mld, link_id, hdr, rx_status, phy_data); rx_status->device_timestamp = phy_data->gp2_on_air_rise; - /* using TLV format and must be after all fixed len fields */ + iwl_mld_set_rx_rate(mld, phy_data, rx_status); + + /* must be before L-SIG data (radiotap field order) */ + if (format == RATE_MCS_MOD_TYPE_HE) + iwl_mld_rx_he(skb, phy_data); + + iwl_mld_decode_lsig(skb, phy_data); + + /* TLVs - must be after radiotap fixed fields */ if (format == RATE_MCS_MOD_TYPE_EHT) - iwl_mld_rx_eht(mld, skb, phy_data, queue); + iwl_mld_rx_eht(mld, skb, phy_data); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mld->monitor.on)) { @@ -1282,9 +1393,9 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id, if (mld->monitor.ptp_time) { u64 adj_time = - iwl_mld_ptp_get_adj_time(mld, - phy_data->gp2_on_air_rise * - NSEC_PER_USEC); + iwl_mld_ptp_get_adj_time(mld, + phy_data->gp2_on_air_rise * + NSEC_PER_USEC); rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC); rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64; @@ -1293,56 +1404,8 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, int link_id, } #endif - if (format != RATE_MCS_MOD_TYPE_CCK && is_sgi) - rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - - switch (format) { - case RATE_MCS_MOD_TYPE_HT: - rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - break; - case RATE_MCS_MOD_TYPE_VHT: - case RATE_MCS_MOD_TYPE_HE: - case RATE_MCS_MOD_TYPE_EHT: - if (format == RATE_MCS_MOD_TYPE_VHT) { - rx_status->encoding = RX_ENC_VHT; - } else if (format == RATE_MCS_MOD_TYPE_HE) { - rx_status->encoding = RX_ENC_HE; - rx_status->he_dcm = - !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); - } else if (format == RATE_MCS_MOD_TYPE_EHT) { - rx_status->encoding = RX_ENC_EHT; - } - - rx_status->nss = u32_get_bits(rate_n_flags, - RATE_MCS_NSS_MSK) + 1; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - break; - default: { - int rate = - iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags, - rx_status->band); - - /* valid rate */ - if (rate >= 0 && rate <= 0xFF) { - rx_status->rate_idx = rate; - break; - } - - /* invalid rate */ - rx_status->rate_idx = 0; - - if (net_ratelimit()) - IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n", - rate_n_flags, rx_status->band); - break; - } - } + if (phy_data->ntfy) + iwl_mld_add_rtap_sniffer_phy_data(mld, skb, phy_data->ntfy); } /* iwl_mld_create_skb adds the rxb to a new skb */ @@ -1763,13 +1826,36 @@ static int iwl_mld_rx_crypto(struct iwl_mld *mld, return 0; } -static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld, - struct iwl_mld_rx_phy_data *phy_data, - struct ieee80211_rx_status *rx_status) +static void iwl_mld_rx_update_ampdu_data(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status) { + u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK; bool toggle_bit = phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + switch (format) { + case RATE_MCS_MOD_TYPE_CCK: + case RATE_MCS_MOD_TYPE_LEGACY_OFDM: + /* no aggregation possible */ + return; + case RATE_MCS_MOD_TYPE_HT: + case RATE_MCS_MOD_TYPE_VHT: + /* single frames are not A-MPDU format */ + if (!(phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU)) + return; + break; + default: + /* HE/EHT/UHR have A-MPDU format for single frames */ + if (!(phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->phy_info & IWL_RX_MPDU_PHY_EOF_INDICATION) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + return; + } + } + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; /* Toggle is switched whenever new aggregation starts. Make * sure ampdu_reference is never 0 so we can later use it to @@ -1781,6 +1867,11 @@ static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld, mld->monitor.ampdu_ref++; mld->monitor.ampdu_toggle = toggle_bit; phy_data->first_subframe = true; + + /* report EOF bit on the first subframe */ + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->phy_info & IWL_RX_MPDU_PHY_EOF_INDICATION) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } rx_status->ampdu_reference = mld->monitor.ampdu_ref; } @@ -1810,6 +1901,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, u32 mpdu_len; enum iwl_mld_reorder_result reorder_res; struct ieee80211_rx_status *rx_status; + unsigned int alloc_size = 128; if (unlikely(mld->fw_status.in_hw_restart)) return; @@ -1824,10 +1916,17 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, "FW lied about packet len (%d)\n", pkt_len)) return; + iwl_mld_fill_phy_data_from_mpdu(mld, mpdu_desc, &phy_data); + /* Don't use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. + * + * For monitor mode we need more space to include the full PHY + * notification data. */ - skb = alloc_skb(128, GFP_ATOMIC); + if (unlikely(mld->monitor.on) && phy_data.ntfy) + alloc_size += sizeof(struct iwl_rx_phy_air_sniffer_ntfy); + skb = alloc_skb(alloc_size, GFP_ATOMIC); if (!skb) { IWL_ERR(mld, "alloc_skb failed\n"); return; @@ -1835,8 +1934,6 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, hdr = (void *)(pkt->data + mpdu_desc_size); - iwl_mld_fill_phy_data(mld, mpdu_desc, &phy_data); - if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { /* If the device inserted padding it means that (it thought) * the 802.11 header wasn't a multiple of 4 bytes long. In @@ -1861,9 +1958,8 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, if (drop) goto drop; - /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) - iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status); + if (unlikely(mld->monitor.on)) + iwl_mld_rx_update_ampdu_data(mld, &phy_data, rx_status); /* Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. @@ -1897,7 +1993,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, link_id = u8_get_bits(mpdu_desc->mac_phy_band, IWL_RX_MPDU_MAC_PHY_BAND_LINK_MASK); - iwl_mld_rx_fill_status(mld, link_id, hdr, skb, &phy_data, queue); + iwl_mld_rx_fill_status(mld, link_id, hdr, skb, &phy_data); if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue, le32_to_cpu(pkt->len_n_flags), &crypto_len)) @@ -2031,87 +2127,65 @@ void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld, wake_up(&mld->rxq_sync.waitq); } -void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, - struct iwl_rx_packet *pkt, int queue) +static void iwl_mld_no_data_rx(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_phy_air_sniffer_ntfy *ntfy) { - struct iwl_rx_no_data_ver_3 *desc; - struct iwl_mld_rx_phy_data phy_data; struct ieee80211_rx_status *rx_status; + struct iwl_mld_rx_phy_data phy_data = { + .ntfy = ntfy, + .phy_info = 0, /* short preamble set below */ + .rate_n_flags = le32_to_cpu(ntfy->rate), + .gp2_on_air_rise = le32_to_cpu(ntfy->on_air_rise_time), + .energy_a = ntfy->rssi_a, + .energy_b = ntfy->rssi_b, + }; + u32 format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; struct sk_buff *skb; - u32 format, rssi; - u8 channel; - - if (unlikely(mld->fw_status.in_hw_restart)) - return; - - if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc), - "Bad RX_NO_DATA_NOTIF size (%d)\n", - iwl_rx_packet_payload_len(pkt))) - return; - - desc = (void *)pkt->data; - - rssi = le32_to_cpu(desc->rssi); - channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK); - - phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK); - phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK); - phy_data.data0 = desc->phy_info[0]; - phy_data.data1 = desc->phy_info[1]; - phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; - phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); - phy_data.rate_n_flags = iwl_v3_rate_from_v2_v3(desc->rate, - mld->fw_rates_ver_3); - phy_data.with_data = false; - - BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec)); - memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec)); - format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - - /* Don't use dev_alloc_skb(), we'll have enough headroom once - * ieee80211_hdr pulled. - */ - skb = alloc_skb(128, GFP_ATOMIC); - if (!skb) { - IWL_ERR(mld, "alloc_skb failed\n"); + skb = alloc_skb(128 + sizeof(struct iwl_rx_phy_air_sniffer_ntfy), + GFP_ATOMIC); + if (!skb) return; - } rx_status = IEEE80211_SKB_RXCB(skb); /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; - /* mark as failed PLCP on any errors to skip checks in mac80211 */ - if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) != - RX_NO_DATA_INFO_ERR_NONE) - rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; - - switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) { - case RX_NO_DATA_INFO_TYPE_NDP: + switch (ntfy->status) { + case IWL_SNIF_STAT_PLCP_RX_OK: + /* we only get here with sounding PPDUs */ rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; break; - case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: - case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED: + case IWL_SNIF_STAT_AID_NOT_FOR_US: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; break; + case IWL_SNIF_STAT_PLCP_RX_LSIG_ERR: + case IWL_SNIF_STAT_PLCP_RX_SIGA_ERR: + case IWL_SNIF_STAT_PLCP_RX_SIGB_ERR: + case IWL_SNIF_STAT_UNKNOWN_ERROR: default: + rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; + fallthrough; + case IWL_SNIF_STAT_UNEXPECTED_TB: + case IWL_SNIF_STAT_UNSUPPORTED_RATE: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; - break; + /* we could include the real reason in a vendor TLV */ } - rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : - NL80211_BAND_2GHZ; + if (format == RATE_MCS_MOD_TYPE_CCK && + ntfy->legacy_sig.cck & cpu_to_le32(CCK_CRFR_SHORT_PREAMBLE)) + phy_data.phy_info |= IWL_RX_MPDU_PHY_SHORT_PREAMBLE; - rx_status->freq = ieee80211_channel_to_frequency(channel, - rx_status->band); + iwl_mld_fill_rx_status_band_freq(IEEE80211_SKB_RXCB(skb), + ntfy->band, ntfy->channel); /* link ID is ignored for NULL header */ - iwl_mld_rx_fill_status(mld, -1, NULL, skb, &phy_data, queue); + iwl_mld_rx_fill_status(mld, -1, NULL, skb, &phy_data); /* No more radiotap info should be added after this point. * Mark it as mac header for upper layers to know where @@ -2119,29 +2193,72 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, */ skb_set_mac_header(skb, skb->len); - /* Override the nss from the rx_vec since the rate_n_flags has - * only 1 bit for the nss which gives a max of 2 ss but there - * may be up to 8 spatial streams. - */ - switch (format) { + /* pass the packet to mac80211 */ + rcu_read_lock(); + ieee80211_rx_napi(mld->hw, NULL, skb, napi); + rcu_read_unlock(); +} + +void iwl_mld_handle_phy_air_sniffer_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt) +{ + struct iwl_rx_phy_air_sniffer_ntfy *ntfy = (void *)pkt->data; + bool is_ndp = false; + u32 he_type; + + if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*ntfy), + "invalid air sniffer notification size\n")) + return; + + /* check if there's an old one to release as errored */ + if (mld->monitor.phy.valid && !mld->monitor.phy.used) { + /* didn't capture data, so override status */ + mld->monitor.phy.data.status = IWL_SNIF_STAT_AID_NOT_FOR_US; + iwl_mld_no_data_rx(mld, napi, &mld->monitor.phy.data); + } + + /* old data is no longer valid now */ + mld->monitor.phy.valid = false; + + he_type = le32_to_cpu(ntfy->rate) & RATE_MCS_HE_TYPE_MSK; + + switch (le32_to_cpu(ntfy->rate) & RATE_MCS_MOD_TYPE_MSK) { + case RATE_MCS_MOD_TYPE_HT: + is_ndp = !le32_get_bits(ntfy->sigs.ht.a1, + OFDM_RX_FRAME_HT_LENGTH); + break; case RATE_MCS_MOD_TYPE_VHT: - rx_status->nss = - le32_get_bits(desc->rx_vec[0], - RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; + is_ndp = le32_get_bits(ntfy->sigs.vht.a0, + OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM_VALID) && + !le32_get_bits(ntfy->sigs.vht.a0, + OFDM_RX_FRAME_VHT_NUM_OF_DATA_SYM); break; case RATE_MCS_MOD_TYPE_HE: - rx_status->nss = - le32_get_bits(desc->rx_vec[0], - RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; + if (he_type == RATE_MCS_HE_TYPE_TRIG) + break; + is_ndp = le32_get_bits(ntfy->sigs.he.a3, + OFDM_RX_FRAME_HE_NUM_OF_DATA_SYM_VALID) && + !le32_get_bits(ntfy->sigs.he.a3, + OFDM_RX_FRAME_HE_NUM_OF_DATA_SYM); break; case RATE_MCS_MOD_TYPE_EHT: - rx_status->nss = - le32_get_bits(desc->rx_vec[2], - RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; + if (he_type == RATE_MCS_HE_TYPE_TRIG) + break; + is_ndp = le32_get_bits(ntfy->sigs.eht.sig2, + OFDM_RX_FRAME_EHT_NUM_OF_DATA_SYM_VALID) && + !le32_get_bits(ntfy->sigs.eht.sig2, + OFDM_RX_FRAME_EHT_NUM_OF_DATA_SYM); + break; } - /* pass the packet to mac80211 */ - rcu_read_lock(); - ieee80211_rx_napi(mld->hw, NULL, skb, napi); - rcu_read_unlock(); + if (ntfy->status != IWL_SNIF_STAT_PLCP_RX_OK || is_ndp) { + iwl_mld_no_data_rx(mld, napi, ntfy); + return; + } + + /* hang on to it for the RX_MPDU data packet(s) */ + mld->monitor.phy.data = *ntfy; + mld->monitor.phy.valid = true; + mld->monitor.phy.used = false; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.h b/drivers/net/wireless/intel/iwlwifi/mld/rx.h index 2beabd7e70b1..09dddbd40f55 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.h @@ -66,7 +66,8 @@ void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld, struct sk_buff *skb, int queue, struct ieee80211_sta *sta); -void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, - struct iwl_rx_packet *pkt, int queue); +void iwl_mld_handle_phy_air_sniffer_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt); #endif /* __iwl_mld_agg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c index 5cdbfa29a202..61ecc33116cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c @@ -890,7 +890,7 @@ static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue, sizeof(queue_counter->per_link)); queue_counter->window_start_time = jiffies; - IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n"); + IWL_DEBUG_EHT(mld, "MPDU counters are cleared\n"); } link_counter = &queue_counter->per_link[mld_link->fw_id]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 865f973f677d..edae13755ee6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -115,7 +115,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, if (version >= 6) { - struct iwl_alive_ntf_v6 *palive; + struct iwl_alive_ntf_v7 *palive; if (pkt_len < sizeof(*palive)) return false; @@ -214,17 +214,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, ~FW_ADDR_CACHE_CONTROL; if (umac_error_table) { - if (umac_error_table >= - mvm->trans->mac_cfg->base->min_umac_error_event_table) { - iwl_fw_umac_set_alive_err_table(mvm->trans, - umac_error_table); - } else { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - umac_error_table, - (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? - "Init" : "RT"); - } + iwl_fw_umac_set_alive_err_table(mvm->trans, + umac_error_table); } alive_data->valid = status == IWL_ALIVE_STATUS_OK; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index b1dca76b7141..380b6f8a53fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -102,9 +102,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mvm->csme_vif = vif; } - if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) - vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; - return 0; out_free_bf: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b515028adc8f..301d590fe0bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -2894,4 +2894,9 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool update); + +/* rate_n_flags conversion */ +u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver); +__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 5e7e2926be0c..4f4111055ddd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -202,17 +202,13 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, const struct cfg80211_chan_def *chandef, - const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic, u32 action) { int ret; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1); - if (ver < 5 || !ap || !ap->chan) - ap = NULL; - - if (ver >= 3 && ver <= 6) { + if (ver >= 3 && ver <= 4) { struct iwl_phy_context_cmd cmd = {}; /* Set the command header fields */ @@ -223,14 +219,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, chains_static, chains_dynamic); - if (ap) { - cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap); - cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap); - } - - if (ver == 6) - cmd.puncture_mask = cpu_to_le16(chandef->punctured); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); } else if (ver < 3) { @@ -284,7 +272,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap, + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_ADD); @@ -342,7 +330,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int ret; /* ... remove it here ...*/ - ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL, + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_REMOVE); if (ret) @@ -356,7 +344,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap, + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action); } @@ -376,7 +364,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT); - iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1, + iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1, FW_CTXT_ACTION_REMOVE); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 5802ed80a9ca..d1619a229d8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -4178,167 +4178,3 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, else return rs_drv_tx_protection(mvm, mvmsta, enable); } - -static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; - int idx; - bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); - int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; - int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; - - for (idx = offset; idx < last; idx++) - if (iwl_fw_rate_idx_to_plcp(idx) == rate) - return idx - offset; - return IWL_RATE_INVALID; -} - -u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver) -{ - u32 rate_v3 = 0, rate_v1; - u32 dup = 0; - - if (rate_ver > 1) - return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3); - - rate_v1 = le32_to_cpu(rate); - if (rate_v1 == 0) - return rate_v1; - /* convert rate */ - if (rate_v1 & RATE_MCS_HT_MSK_V1) { - u32 nss; - - rate_v3 |= RATE_MCS_MOD_TYPE_HT; - rate_v3 |= - rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; - nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK); - rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); - } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || - rate_v1 & RATE_MCS_HE_MSK_V1) { - u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK); - - rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; - - rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); - - if (rate_v1 & RATE_MCS_HE_MSK_V1) { - u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; - u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; - u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> - RATE_MCS_HE_106T_POS_V1; - u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> - RATE_MCS_HE_GI_LTF_POS; - - if ((he_type_bits == RATE_MCS_HE_TYPE_SU || - he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && - he_gi_ltf == RATE_MCS_HE_SU_4_LTF) - /* the new rate have an additional bit to - * represent the value 4 rather then using SGI - * bit for this purpose - as it was done in the - * old rate - */ - he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> - RATE_MCS_SGI_POS_V1; - - rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; - rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS; - rate_v3 |= he_106t << RATE_MCS_HE_106T_POS; - rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; - rate_v3 |= RATE_MCS_MOD_TYPE_HE; - } else { - rate_v3 |= RATE_MCS_MOD_TYPE_VHT; - } - /* if legacy format */ - } else { - u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); - - if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) - legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? - IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; - - rate_v3 |= legacy_rate; - if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) - rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM; - } - - /* convert flags */ - if (rate_v1 & RATE_MCS_LDPC_MSK_V1) - rate_v3 |= RATE_MCS_LDPC_MSK; - rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | - (rate_v1 & RATE_MCS_ANT_AB_MSK) | - (rate_v1 & RATE_MCS_STBC_MSK) | - (rate_v1 & RATE_MCS_BF_MSK); - - dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; - if (dup) { - rate_v3 |= RATE_MCS_DUP_MSK; - rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS; - } - - if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && - (rate_v1 & RATE_MCS_SGI_MSK_V1)) - rate_v3 |= RATE_MCS_SGI_MSK; - - return rate_v3; -} - -__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver) -{ - u32 result = 0; - int rate_idx; - - if (rate_ver > 1) - return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2); - - switch (rate & RATE_MCS_MOD_TYPE_MSK) { - case RATE_MCS_MOD_TYPE_CCK: - result = RATE_MCS_CCK_MSK_V1; - fallthrough; - case RATE_MCS_MOD_TYPE_LEGACY_OFDM: - rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK); - if (!(result & RATE_MCS_CCK_MSK_V1)) - rate_idx += IWL_FIRST_OFDM_RATE; - result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx), - RATE_LEGACY_RATE_MSK_V1); - break; - case RATE_MCS_MOD_TYPE_HT: - result = RATE_MCS_HT_MSK_V1; - result |= u32_encode_bits(u32_get_bits(rate, - RATE_HT_MCS_CODE_MSK), - RATE_HT_MCS_RATE_CODE_MSK_V1); - result |= u32_encode_bits(u32_get_bits(rate, - RATE_MCS_NSS_MSK), - RATE_HT_MCS_MIMO2_MSK); - break; - case RATE_MCS_MOD_TYPE_VHT: - result = RATE_MCS_VHT_MSK_V1; - result |= u32_encode_bits(u32_get_bits(rate, - RATE_VHT_MCS_NSS_MSK), - RATE_MCS_CODE_MSK); - result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK), - RATE_VHT_MCS_NSS_MSK); - break; - case RATE_MCS_MOD_TYPE_HE: /* not generated */ - default: - WARN_ONCE(1, "bad modulation type %d\n", - u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK)); - return 0; - } - - if (rate & RATE_MCS_LDPC_MSK) - result |= RATE_MCS_LDPC_MSK_V1; - WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) > - RATE_MCS_CHAN_WIDTH_160_VAL); - result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) | - (rate & RATE_MCS_ANT_AB_MSK) | - (rate & RATE_MCS_STBC_MSK) | - (rate & RATE_MCS_BF_MSK); - - /* not handling DUP since we don't use it */ - WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK); - - if (rate & RATE_MCS_SGI_MSK) - result |= RATE_MCS_SGI_MSK_V1; - - return cpu_to_le32(result); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index dfb062b7c5c2..34c957bef6f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -425,9 +425,6 @@ void iwl_mvm_rate_control_unregister(void); struct iwl_mvm_sta; -u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver); -__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver); - int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 8c1bb3a7ffca..d0c0faae0122 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -519,6 +519,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, return; } rx_status->rate_idx = rate; + /* override BW - it could be DUP and indicate the wrong BW */ + rx_status->bw = RATE_INFO_BW_20; } #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index fa995e235d9b..1a6c1f8706e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1243,3 +1243,167 @@ bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif) return false; } + +static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return IWL_RATE_INVALID; +} + +u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver) +{ + u32 rate_v3 = 0, rate_v1; + u32 dup = 0; + + if (rate_ver > 1) + return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3); + + rate_v1 = le32_to_cpu(rate); + if (rate_v1 == 0) + return rate_v1; + /* convert rate */ + if (rate_v1 & RATE_MCS_HT_MSK_V1) { + u32 nss; + + rate_v3 |= RATE_MCS_MOD_TYPE_HT; + rate_v3 |= + rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; + nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK); + rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || + rate_v1 & RATE_MCS_HE_MSK_V1) { + u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK); + + rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; + + rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + + if (rate_v1 & RATE_MCS_HE_MSK_V1) { + u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; + u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> + RATE_MCS_HE_106T_POS_V1; + u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> + RATE_MCS_HE_GI_LTF_POS; + + if ((he_type_bits == RATE_MCS_HE_TYPE_SU || + he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && + he_gi_ltf == RATE_MCS_HE_SU_4_LTF) + /* the new rate have an additional bit to + * represent the value 4 rather then using SGI + * bit for this purpose - as it was done in the + * old rate + */ + he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> + RATE_MCS_SGI_POS_V1; + + rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; + rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS; + rate_v3 |= he_106t << RATE_MCS_HE_106T_POS; + rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; + rate_v3 |= RATE_MCS_MOD_TYPE_HE; + } else { + rate_v3 |= RATE_MCS_MOD_TYPE_VHT; + } + /* if legacy format */ + } else { + u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); + + if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) + legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? + IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; + + rate_v3 |= legacy_rate; + if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) + rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM; + } + + /* convert flags */ + if (rate_v1 & RATE_MCS_LDPC_MSK_V1) + rate_v3 |= RATE_MCS_LDPC_MSK; + rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | + (rate_v1 & RATE_MCS_ANT_AB_MSK) | + (rate_v1 & RATE_MCS_STBC_MSK) | + (rate_v1 & RATE_MCS_BF_MSK); + + dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; + if (dup) { + rate_v3 |= RATE_MCS_DUP_MSK; + rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS; + } + + if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && + (rate_v1 & RATE_MCS_SGI_MSK_V1)) + rate_v3 |= RATE_MCS_SGI_MSK; + + return rate_v3; +} + +__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver) +{ + u32 result = 0; + int rate_idx; + + if (rate_ver > 1) + return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2); + + switch (rate & RATE_MCS_MOD_TYPE_MSK) { + case RATE_MCS_MOD_TYPE_CCK: + result = RATE_MCS_CCK_MSK_V1; + fallthrough; + case RATE_MCS_MOD_TYPE_LEGACY_OFDM: + rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK); + if (!(result & RATE_MCS_CCK_MSK_V1)) + rate_idx += IWL_FIRST_OFDM_RATE; + result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx), + RATE_LEGACY_RATE_MSK_V1); + break; + case RATE_MCS_MOD_TYPE_HT: + result = RATE_MCS_HT_MSK_V1; + result |= u32_encode_bits(u32_get_bits(rate, + RATE_HT_MCS_CODE_MSK), + RATE_HT_MCS_RATE_CODE_MSK_V1); + result |= u32_encode_bits(u32_get_bits(rate, + RATE_MCS_NSS_MSK), + RATE_HT_MCS_MIMO2_MSK); + break; + case RATE_MCS_MOD_TYPE_VHT: + result = RATE_MCS_VHT_MSK_V1; + result |= u32_encode_bits(u32_get_bits(rate, + RATE_VHT_MCS_NSS_MSK), + RATE_MCS_CODE_MSK); + result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK), + RATE_VHT_MCS_NSS_MSK); + break; + case RATE_MCS_MOD_TYPE_HE: /* not generated */ + default: + WARN_ONCE(1, "bad modulation type %d\n", + u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK)); + return 0; + } + + if (rate & RATE_MCS_LDPC_MSK) + result |= RATE_MCS_LDPC_MSK_V1; + WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) > + RATE_MCS_CHAN_WIDTH_160_VAL); + result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) | + (rate & RATE_MCS_ANT_AB_MSK) | + (rate & RATE_MCS_STBC_MSK) | + (rate & RATE_MCS_BF_MSK); + + /* not handling DUP since we don't use it */ + WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK); + + if (rate & RATE_MCS_SGI_MSK) + result |= RATE_MCS_SGI_MSK_V1; + + return cpu_to_le32(result); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b21a4d8eb105..dc99e7ac4726 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1061,12 +1061,18 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { /* WH RF */ IWL_DEV_INFO(iwl_rf_wh, iwl_be211_name, RF_TYPE(WH)), + IWL_DEV_INFO(iwl_rf_wh_non_eht, iwl_ax221_name, RF_TYPE(WH), + SUBDEV(0x0514)), + IWL_DEV_INFO(iwl_rf_wh_non_eht, iwl_ax221_name, RF_TYPE(WH), + SUBDEV(0x4514)), IWL_DEV_INFO(iwl_rf_wh_160mhz, iwl_be213_name, RF_TYPE(WH), BW_LIMITED), /* PE RF */ IWL_DEV_INFO(iwl_rf_pe, iwl_bn201_name, RF_TYPE(PE)), - IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), SUBDEV(0x0524)), - IWL_DEV_INFO(iwl_rf_pe, iwl_be221_name, RF_TYPE(PE), SUBDEV(0x0324)), + IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), + SUBDEV_MASKED(0x0524, 0xFFF)), + IWL_DEV_INFO(iwl_rf_pe, iwl_bn203_name, RF_TYPE(PE), + SUBDEV_MASKED(0x0324, 0xFFF)), /* Killer */ IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775s_name, SUBDEV(0x1776)), diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c index 59307b5df441..164d060ec617 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c @@ -4218,6 +4218,15 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev, pdev->device, pdev->subsystem_device, info.hw_rev, info.hw_rf_id); +#if !IS_ENABLED(CONFIG_IWLMLD) + if (iwl_drv_is_wifi7_supported(iwl_trans)) { + IWL_ERR(iwl_trans, + "IWLMLD needs to be compiled to support this device\n"); + ret = -EOPNOTSUPP; + goto out_free_trans; + } +#endif + dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_RFID_TYPE(info.hw_rf_id), CSR_HW_RFID_IS_CDB(info.hw_rf_id), diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index c31bbd4e7a4a..6bf2ad18b009 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -265,6 +265,34 @@ static void devinfo_api_range(struct kunit *test) } } +static void devinfo_pci_ids_config(struct kunit *test) +{ + for (int i = 0; iwl_hw_card_ids[i].vendor; i++) { + const struct pci_device_id *s = &iwl_hw_card_ids[i]; + const struct iwl_dev_info *di; + + if (s->device == PCI_ANY_ID || s->subdevice == PCI_ANY_ID) + continue; + +#if IS_ENABLED(CONFIG_IWLMVM) || IS_ENABLED(CONFIG_IWLMLD) + /* + * The check below only works for old (pre-CNVI) devices. Most + * new have subdevice==ANY, so are already skipped, but for some + * Bz platform(s) we list all the RF PCI IDs. Skip those too. + */ + if (s->driver_data == (kernel_ulong_t)&iwl_bz_mac_cfg) + continue; +#endif + + di = iwl_pci_find_dev_info(s->device, s->subdevice, + 0, 0, 0, 0, true); + + KUNIT_EXPECT_PTR_NE_MSG(test, di, NULL, + "PCI ID %04x:%04x not found\n", + s->device, s->subdevice); + } +} + static struct kunit_case devinfo_test_cases[] = { KUNIT_CASE(devinfo_table_order), KUNIT_CASE(devinfo_discrete_match), @@ -276,6 +304,7 @@ static struct kunit_case devinfo_test_cases[] = { KUNIT_CASE(devinfo_pci_ids), KUNIT_CASE(devinfo_no_mac_cfg_dups), KUNIT_CASE(devinfo_api_range), + KUNIT_CASE(devinfo_pci_ids_config), {} }; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index b264ed0af923..65d0f805459c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -24,6 +24,7 @@ #include <linux/crc-ccitt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/nvmem-consumer.h> #include <linux/slab.h> #include "rt2x00.h" @@ -10962,6 +10963,36 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); +int rt2800_read_eeprom_nvmem(struct rt2x00_dev *rt2x00dev) +{ + struct device_node *np = rt2x00dev->dev->of_node; + unsigned int len = rt2x00dev->ops->eeprom_size; + struct nvmem_cell *cell; + const void *data; + size_t retlen; + + cell = of_nvmem_cell_get(np, "eeprom"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + data = nvmem_cell_read(cell, &retlen); + nvmem_cell_put(cell); + + if (IS_ERR(data)) + return PTR_ERR(data); + + if (retlen != len) { + dev_err(rt2x00dev->dev, "invalid eeprom size, required: 0x%04x\n", len); + kfree(data); + return -EINVAL; + } + + memcpy(rt2x00dev->eeprom, data, len); + kfree(data); + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_read_eeprom_nvmem); + static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev) { u16 word; @@ -11011,7 +11042,9 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - rt2x00lib_set_mac_address(rt2x00dev, mac); + retval = rt2x00lib_set_mac_address(rt2x00dev, mac); + if (retval) + return retval; word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 620a3d9872ce..a3c3a751f57e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -248,6 +248,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev); int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); +int rt2800_read_eeprom_nvmem(struct rt2x00_dev *rt2x00dev); + int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev); void rt2800_get_key_seq(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 14c45aba836f..4fa14bb573ad 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -278,6 +278,9 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; + if (!rt2800_read_eeprom_nvmem(rt2x00dev)) + return 0; + if (rt2800pci_efuse_detect(rt2x00dev)) retval = rt2800pci_read_eeprom_efuse(rt2x00dev); else diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index 8f510a84e7f1..5c29201b34c8 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -92,8 +92,12 @@ static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev, static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev) { - void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); + void __iomem *base_addr; + if (!rt2800_read_eeprom_nvmem(rt2x00dev)) + return 0; + + base_addr = ioremap(0x1F040000, EEPROM_SIZE); if (!base_addr) return -ENOMEM; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 09b9d1f9f793..665887e9b118 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1427,7 +1427,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, */ u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif); -void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); +int rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); /* * Interrupt context handlers. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index f8a6f9c968a1..778a478ab53a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -988,14 +988,20 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry, entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; } -void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) +int rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { - of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); + int ret; + + ret = of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); + if (ret == -EPROBE_DEFER) + return ret; if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr); } + + return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address); diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index 3b4ded2ac801..37232ee22037 100644 --- a/drivers/net/wireless/st/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c @@ -317,10 +317,12 @@ static int cw1200_bh_rx_helper(struct cw1200_common *priv, if (wsm_id & 0x0400) { int rc = wsm_release_tx_buffer(priv, 1); - if (WARN_ON(rc < 0)) + if (WARN_ON(rc < 0)) { + dev_kfree_skb(skb_rx); return rc; - else if (rc > 0) + } else if (rc > 0) { *tx = 1; + } } /* cw1200_wsm_rx takes care on SKB livetime */ diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 80fbf740fe6d..ac756318e8ea 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -272,7 +272,6 @@ static ssize_t radar_detection_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -312,7 +311,6 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -374,7 +372,6 @@ static ssize_t radar_debug_mode_write(struct file *file, wl->radar_debug_mode, 0); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index fa3a3f71dd15..9d73ba933a16 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -213,7 +213,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); free_vector: kfree(events_vector); diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index eb3d3f0e0b4d..bbfd2725215b 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -63,7 +63,6 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -113,7 +112,6 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, chip_op = arg; chip_op(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } @@ -287,7 +285,6 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -357,7 +354,6 @@ static ssize_t forced_ps_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, ps_mode); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -830,7 +826,6 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -886,7 +881,6 @@ static ssize_t rx_streaming_always_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -934,7 +928,6 @@ static ssize_t beacon_filtering_write(struct file *file, ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -1015,7 +1008,6 @@ static ssize_t sleep_auth_write(struct file *file, goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -1090,7 +1082,6 @@ read_err: goto part_err; part_err: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); skip_read: @@ -1172,7 +1163,6 @@ write_err: goto part_err; part_err: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); skip_write: @@ -1247,7 +1237,6 @@ static ssize_t fw_logger_write(struct file *file, ret = wl12xx_cmd_config_fwlog(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 6116a8522d96..12f0167d7380 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -154,7 +154,6 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -181,7 +180,6 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -234,7 +232,6 @@ static void wlcore_rc_update_work(struct work_struct *work) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -711,7 +708,6 @@ static int wlcore_irq_locked(struct wl1271 *wl) } err_ret: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -1047,7 +1043,6 @@ static void wl1271_recovery_work(struct work_struct *work) } wlcore_op_stop_locked(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); ieee80211_restart_hw(wl->hw); @@ -1943,7 +1938,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -2131,7 +2125,6 @@ static void wlcore_channel_switch_work(struct work_struct *work) wl12xx_cmd_stop_channel_switch(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -2201,7 +2194,6 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -2694,7 +2686,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, else wl->sta_count++; out: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -2774,7 +2765,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, } } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } deinit: @@ -3200,7 +3190,6 @@ static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3315,7 +3304,6 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, */ out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3531,7 +3519,6 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_wake_queues: @@ -3695,7 +3682,6 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_unlock: @@ -3724,7 +3710,6 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3772,7 +3757,6 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3823,7 +3807,6 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, ieee80211_scan_completed(wl->hw, &info); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3860,7 +3843,6 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, wl->sched_vif = wlvif; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3887,7 +3869,6 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, wl->ops->sched_scan_stop(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3916,7 +3897,6 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3948,7 +3928,6 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -4714,7 +4693,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -4779,7 +4757,6 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, } } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4828,7 +4805,6 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = true; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4871,7 +4847,6 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = false; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4941,7 +4916,6 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, goto out_sleep; } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4995,7 +4969,6 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 0, 0); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5029,7 +5002,6 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5342,7 +5314,6 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5467,7 +5438,6 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5511,7 +5481,6 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } out: @@ -5566,7 +5535,6 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5645,7 +5613,6 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5699,7 +5666,6 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, msecs_to_jiffies(duration)); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5748,7 +5714,6 @@ static int wlcore_roc_completed(struct wl1271 *wl) ret = __wlcore_roc_completed(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5839,7 +5804,6 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, sinfo->signal = rssi_dbm; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index b414305acc32..f6dc54c1dbad 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -69,7 +69,6 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); ieee80211_scan_completed(wl->hw, &info); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index 65ca5dc569a0..5ab6c1683675 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -58,7 +58,6 @@ static ssize_t bt_coex_state_store(struct device *dev, goto out; wl1271_acx_sg_enable(wl, wl->sg_enabled); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index fc8ea58bc165..7c0cb1b7fef0 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -127,7 +127,6 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -192,7 +191,6 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) out_free: kfree(cmd); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 464587d16ab2..f76087be2f75 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -863,7 +863,6 @@ void wl1271_tx_work(struct work_struct *work) goto out; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index e4269e2b0098..5bb9eb300f97 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -60,7 +60,6 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -92,7 +91,6 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, ret = wlcore_smart_config_stop(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -140,7 +138,6 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 5903d82e1ab1..551f5eb4e747 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -5799,6 +5799,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, NO_AUTO_VIF); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PUNCT); for (i = 0; i < ARRAY_SIZE(data->link_data); i++) { hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon, diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c index 64dab8b57611..6a5b22589af4 100644 --- a/drivers/net/wwan/qcom_bam_dmux.c +++ b/drivers/net/wwan/qcom_bam_dmux.c @@ -162,7 +162,6 @@ static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma) struct bam_dmux *dmux = skb_dma->dmux; unsigned long flags; - pm_runtime_mark_last_busy(dmux->dev); pm_runtime_put_autosuspend(dmux->dev); if (skb_dma->addr) @@ -397,7 +396,6 @@ static void bam_dmux_tx_wakeup_work(struct work_struct *work) dma_async_issue_pending(dmux->tx); out: - pm_runtime_mark_last_busy(dmux->dev); pm_runtime_put_autosuspend(dmux->dev); } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 97163e1e5783..689c920ca898 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -250,7 +250,6 @@ static void t7xx_cldma_rx_done(struct work_struct *work) t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); - pm_runtime_mark_last_busy(md_ctrl->dev); pm_runtime_put_autosuspend(md_ctrl->dev); } @@ -362,7 +361,6 @@ static void t7xx_cldma_tx_done(struct work_struct *work) } spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); - pm_runtime_mark_last_busy(md_ctrl->dev); pm_runtime_put_autosuspend(md_ctrl->dev); } @@ -987,7 +985,6 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb allow_sleep: t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); - pm_runtime_mark_last_busy(md_ctrl->dev); pm_runtime_put_autosuspend(md_ctrl->dev); return ret; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index 2310493203d3..b76bea6ab2d7 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -877,7 +877,6 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget) t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); - pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); atomic_set(&rxq->rx_processing, 0); } else { @@ -1078,7 +1077,6 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work) } t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); - pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c index 8dab025a088a..236d632cf591 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -185,7 +185,6 @@ static void t7xx_dpmaif_tx_done(struct work_struct *work) } t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); - pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } @@ -468,7 +467,6 @@ static int t7xx_dpmaif_tx_hw_push_thread(void *arg) t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); t7xx_do_tx_hw_push(dpmaif_ctrl); t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); - pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a11a0e949400..7c2220366623 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2696,8 +2696,9 @@ static int __init netif_init(void) pr_info("Initialising Xen virtual ethernet driver\n"); - /* Allow as many queues as there are CPUs inut max. 8 if user has not - * specified a value. + /* Allow the number of queues to match the number of CPUs, but not exceed + * the maximum limit. If the user has not specified a value, the default + * maximum limit is 8. */ if (xennet_max_queues == 0) xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, |
