diff options
Diffstat (limited to 'drivers/net')
299 files changed, 13450 insertions, 4211 deletions
diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 734a0b3242a9..ed86537b2f61 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -979,7 +979,7 @@ static void amt_event_send_request(struct amt_dev *amt) amt->req_cnt++; out: exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); - mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); + mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp)); } static void amt_req_work(struct work_struct *work) @@ -1046,7 +1046,8 @@ static bool amt_send_membership_update(struct amt_dev *amt, amt->gw_port, amt->relay_port, false, - false); + false, + 0); amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true); return false; } @@ -1103,7 +1104,8 @@ static void amt_send_multicast_data(struct amt_dev *amt, amt->relay_port, tunnel->source_port, false, - false); + false, + 0); } static bool amt_send_membership_query(struct amt_dev *amt, @@ -1161,7 +1163,8 @@ static bool amt_send_membership_query(struct amt_dev *amt, amt->relay_port, tunnel->source_port, false, - false); + false, + 0); amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true); return false; } diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index a9dffdcac805..0df3208783ad 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -362,8 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, bareudp->port, !net_eq(bareudp->net, dev_net(bareudp->dev)), - !test_bit(IP_TUNNEL_CSUM_BIT, - info->key.tun_flags)); + !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags), + 0); return 0; free_dst: @@ -431,7 +431,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, &saddr, &daddr, prio, ttl, info->key.label, sport, bareudp->port, !test_bit(IP_TUNNEL_CSUM_BIT, - info->key.tun_flags)); + info->key.tun_flags), + 0); return 0; free_dst: diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c index 3809c148fb88..a94bd67c670c 100644 --- a/drivers/net/can/dev/calc_bittiming.c +++ b/drivers/net/can/dev/calc_bittiming.c @@ -179,7 +179,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO)) return; - *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + *ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK; /* As specified in ISO 11898-1 section 11.3.3 "Transmitter * delay compensation" (TDC) is only applicable if data BRP is diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index a36842ace084..13826e8a707b 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -18,7 +18,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, - [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, + [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, [IFLA_CAN_TDC] = { .type = NLA_NESTED }, [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED }, @@ -67,12 +67,12 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); - u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK; + u32 tdc_flags = cm->flags & CAN_CTRLMODE_FD_TDC_MASK; is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */ - if (tdc_flags == CAN_CTRLMODE_TDC_MASK) + if (tdc_flags == CAN_CTRLMODE_FD_TDC_MASK) return -EOPNOTSUPP; /* If one of the CAN_CTRLMODE_TDC_* flag is set then * TDC must be set and vice-versa @@ -144,7 +144,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla, const struct can_tdc_const *tdc_const = priv->fd.tdc_const; int err; - if (!tdc_const || !can_tdc_is_enabled(priv)) + if (!tdc_const || !can_fd_tdc_is_enabled(priv)) return -EOPNOTSUPP; err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla, @@ -189,7 +189,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct can_priv *priv = netdev_priv(dev); - u32 tdc_mask = 0; + bool fd_tdc_flag_provided = false; int err; /* We need synchronization with dev->stop() */ @@ -230,16 +230,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], dev->mtu = CAN_MTU; memset(&priv->fd.data_bittiming, 0, sizeof(priv->fd.data_bittiming)); - priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK; memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc)); } - tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK; + fd_tdc_flag_provided = cm->mask & CAN_CTRLMODE_FD_TDC_MASK; /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually * exclusive: make sure to turn the other one off */ - if (tdc_mask) - priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK; + if (fd_tdc_flag_provided) + priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_FD_TDC_MASK; } if (data[IFLA_CAN_BITTIMING]) { @@ -339,10 +339,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], err = can_tdc_changelink(priv, data[IFLA_CAN_TDC], extack); if (err) { - priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; + priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK; return err; } - } else if (!tdc_mask) { + } else if (!fd_tdc_flag_provided) { /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ @@ -409,7 +409,7 @@ static size_t can_tdc_get_size(const struct net_device *dev) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */ } - if (can_tdc_is_enabled(priv)) { + if (can_fd_tdc_is_enabled(priv)) { if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL || priv->fd.do_get_auto_tdcv) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */ @@ -490,7 +490,7 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max))) goto err_cancel; - if (can_tdc_is_enabled(priv)) { + if (can_fd_tdc_is_enabled(priv)) { u32 tdcv; int err = -EINVAL; diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index e5c162f8c589..8edaa339d590 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -411,10 +411,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv = cdev_to_priv(mcan_class); priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); - if (PTR_ERR(priv->power) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto out_m_can_class_free_dev; - } else { + if (IS_ERR(priv->power)) { + if (PTR_ERR(priv->power) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_m_can_class_free_dev; + } priv->power = NULL; } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 7f10213738e5..1e559c0ff038 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -191,9 +191,19 @@ /* RSCFDnCFDCmFDCFG */ #define RCANFD_GEN4_FDCFG_CLOE BIT(30) #define RCANFD_GEN4_FDCFG_FDOE BIT(28) +#define RCANFD_FDCFG_TDCO GENMASK(23, 16) #define RCANFD_FDCFG_TDCE BIT(9) #define RCANFD_FDCFG_TDCOC BIT(8) -#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16) + +/* RSCFDnCFDCmFDSTS */ +#define RCANFD_FDSTS_SOC GENMASK(31, 24) +#define RCANFD_FDSTS_EOC GENMASK(23, 16) +#define RCANFD_GEN4_FDSTS_TDCVF BIT(15) +#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12) +#define RCANFD_FDSTS_SOCO BIT(9) +#define RCANFD_FDSTS_EOCO BIT(8) +#define RCANFD_FDSTS_TDCVF BIT(7) +#define RCANFD_FDSTS_TDCR GENMASK(7, 0) /* RSCFDnCFDRFCCx */ #define RCANFD_RFCC_RFIM BIT(12) @@ -425,19 +435,10 @@ #define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) /* R-Car Gen4 Classical and CAN FD mode specific register map */ -#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m))) - #define RCANFD_GEN4_GAFL_OFFSET (0x1800) /* CAN FD mode specific register map */ -/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ -#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m))) -#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m))) -#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m))) -#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m))) -#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m))) - /* RSCFDnCFDGAFLXXXj offset */ #define RCANFD_F_GAFL_OFFSET (0x1000) @@ -510,7 +511,7 @@ struct rcar_canfd_regs { u16 cfcc; /* Common FIFO Configuration/Control Register */ u16 cfsts; /* Common FIFO Status Register */ u16 cfpctr; /* Common FIFO Pointer Control Register */ - u16 f_dcfg; /* Global FD Configuration Register */ + u16 coffset; /* Channel Data Bitrate Configuration Register */ u16 rfoffset; /* Receive FIFO buffer access ID register */ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */ }; @@ -529,6 +530,7 @@ struct rcar_canfd_shift_data { struct rcar_canfd_hw_info { const struct can_bittiming_const *nom_bittiming; const struct can_bittiming_const *data_bittiming; + const struct can_tdc_const *tdc_const; const struct rcar_canfd_regs *regs; const struct rcar_canfd_shift_data *sh; u8 rnc_field_width; @@ -636,12 +638,31 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .brp_inc = 1, }; +/* CAN FD Transmission Delay Compensation constants */ +static const struct can_tdc_const rcar_canfd_gen3_tdc_const = { + .tdcv_min = 1, + .tdcv_max = 128, + .tdco_min = 1, + .tdco_max = 128, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + +static const struct can_tdc_const rcar_canfd_gen4_tdc_const = { + .tdcv_min = 1, + .tdcv_max = 256, + .tdco_min = 1, + .tdco_max = 256, + .tdcf_min = 0, /* Filter window not supported */ + .tdcf_max = 0, +}; + static const struct rcar_canfd_regs rcar_gen3_regs = { .rfcc = 0x00b8, .cfcc = 0x0118, .cfsts = 0x0178, .cfpctr = 0x01d8, - .f_dcfg = 0x0500, + .coffset = 0x0500, .rfoffset = 0x3000, .cfoffset = 0x3400, }; @@ -651,7 +672,7 @@ static const struct rcar_canfd_regs rcar_gen4_regs = { .cfcc = 0x0120, .cfsts = 0x01e0, .cfpctr = 0x0240, - .f_dcfg = 0x1400, + .coffset = 0x1400, .rfoffset = 0x6000, .cfoffset = 0x6400, }; @@ -681,6 +702,7 @@ static const struct rcar_canfd_shift_data rcar_gen4_shift_data = { static const struct rcar_canfd_hw_info rcar_gen3_hw_info = { .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .tdc_const = &rcar_canfd_gen3_tdc_const, .regs = &rcar_gen3_regs, .sh = &rcar_gen3_shift_data, .rnc_field_width = 8, @@ -697,6 +719,7 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = { static const struct rcar_canfd_hw_info rcar_gen4_hw_info = { .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .tdc_const = &rcar_canfd_gen4_tdc_const, .regs = &rcar_gen4_regs, .sh = &rcar_gen4_shift_data, .rnc_field_width = 16, @@ -713,6 +736,7 @@ static const struct rcar_canfd_hw_info rcar_gen4_hw_info = { static const struct rcar_canfd_hw_info rzg2l_hw_info = { .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .tdc_const = &rcar_canfd_gen3_tdc_const, .regs = &rcar_gen3_regs, .sh = &rcar_gen3_shift_data, .rnc_field_width = 8, @@ -729,6 +753,7 @@ static const struct rcar_canfd_hw_info rzg2l_hw_info = { static const struct rcar_canfd_hw_info r9a09g047_hw_info = { .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .tdc_const = &rcar_canfd_gen4_tdc_const, .regs = &rcar_gen4_regs, .sh = &rcar_gen4_shift_data, .rnc_field_width = 16, @@ -781,23 +806,54 @@ static void rcar_canfd_update_bit(void __iomem *base, u32 reg, static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { + u32 *data = (u32 *)cf->data; u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) - *((u32 *)cf->data + i) = - rcar_canfd_read(priv->base, off + i * sizeof(u32)); + data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32)); } static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { + const u32 *data = (u32 *)cf->data; u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) - rcar_canfd_write(priv->base, off + i * sizeof(u32), - *((u32 *)cf->data + i)); + rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]); +} + +/* RSCFDnCFDCmXXX -> rcar_canfd_f_xxx(gpriv, ch) */ +static inline unsigned int rcar_canfd_f_dcfg(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + return gpriv->info->regs->coffset + 0x00 + 0x20 * ch; +} + +static inline unsigned int rcar_canfd_f_cfdcfg(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + return gpriv->info->regs->coffset + 0x04 + 0x20 * ch; +} + +static inline unsigned int rcar_canfd_f_cfdctr(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + return gpriv->info->regs->coffset + 0x08 + 0x20 * ch; +} + +static inline unsigned int rcar_canfd_f_cfdsts(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + return gpriv->info->regs->coffset + 0x0c + 0x20 * ch; +} + +static inline unsigned int rcar_canfd_f_cfdcrc(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + return gpriv->info->regs->coffset + 0x10 + 0x20 * ch; } static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) @@ -808,8 +864,8 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) can_free_echo_skb(ndev, i, NULL); } -static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch, - unsigned int num_rules) +static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch, + unsigned int num_rules) { unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width; unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width; @@ -827,8 +883,8 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv) for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) - rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch), - val); + rcar_canfd_set_bit(gpriv->base, + rcar_canfd_f_cfdcfg(gpriv, ch), val); } else { if (gpriv->fdmode) rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, @@ -841,6 +897,7 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv) static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) { + struct device *dev = &gpriv->pdev->dev; u32 sts, ch; int err; @@ -850,7 +907,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, "global raminit failed\n"); + dev_dbg(dev, "global raminit failed\n"); return err; } @@ -863,7 +920,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, (sts & RCANFD_GSTS_GRSTSTS), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, "global reset failed\n"); + dev_dbg(dev, "global reset failed\n"); return err; } @@ -887,8 +944,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) (sts & RCANFD_CSTS_CRSTSTS), 2, 500000); if (err) { - dev_dbg(&gpriv->pdev->dev, - "channel %u reset failed\n", ch); + dev_dbg(dev, "channel %u reset failed\n", ch); return err; } } @@ -938,7 +994,7 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, RCANFD_GAFLECTR_AFLDAE)); /* Write number of rules for channel */ - rcar_canfd_setrnc(gpriv, ch, num_rules); + rcar_canfd_set_rnc(gpriv, ch, num_rules); if (gpriv->info->shared_can_regs) offset = RCANFD_GEN4_GAFL_OFFSET; else if (gpriv->fdmode) @@ -1436,14 +1492,17 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void rcar_canfd_set_bittiming(struct net_device *dev) +static void rcar_canfd_set_bittiming(struct net_device *ndev) { - struct rcar_canfd_channel *priv = netdev_priv(dev); + u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC; + struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; + const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const; + const struct can_tdc *tdc = &priv->can.fd.tdc; + u32 cfg, tdcmode = 0, tdco = 0; u16 brp, sjw, tseg1, tseg2; - u32 cfg; u32 ch = priv->channel; /* Nominal bit timing settings */ @@ -1452,46 +1511,43 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - /* CAN FD only mode */ + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) | RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2)); + } else { + cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) | + RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2)); + } - rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); - - /* Data bit timing settings */ - brp = dbt->brp - 1; - sjw = dbt->sjw - 1; - tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; - tseg2 = dbt->phase_seg2 - 1; - - cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) | - RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2)); + rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg); - netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); - } else { - /* Classical CAN only mode */ - if (gpriv->info->shared_can_regs) { - cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | - RCANFD_NCFG_NBRP(brp) | - RCANFD_NCFG_NSJW(gpriv, sjw) | - RCANFD_NCFG_NTSEG2(gpriv, tseg2)); - } else { - cfg = (RCANFD_CFG_TSEG1(tseg1) | - RCANFD_CFG_BRP(brp) | - RCANFD_CFG_SJW(sjw) | - RCANFD_CFG_TSEG2(tseg2)); - } + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) + return; - rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); - netdev_dbg(priv->ndev, - "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", - brp, sjw, tseg1, tseg2); + /* Data bit timing settings */ + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + + cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) | + RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2)); + + rcar_canfd_write(priv->base, rcar_canfd_f_dcfg(gpriv, ch), cfg); + + /* Transceiver Delay Compensation */ + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) { + /* TDC enabled, measured + offset */ + tdcmode = RCANFD_FDCFG_TDCE; + tdco = tdc->tdco - 1; + } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) { + /* TDC enabled, offset only */ + tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC; + tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1; } + + rcar_canfd_update_bit(gpriv->base, rcar_canfd_f_cfdcfg(gpriv, ch), mask, + tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco)); } static int rcar_canfd_start(struct net_device *ndev) @@ -1691,7 +1747,8 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) { - struct net_device_stats *stats = &priv->ndev->stats; + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf; struct sk_buff *skb; @@ -1707,14 +1764,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && sts & RCANFD_RFFDSTS_RFFDF) - skb = alloc_canfd_skb(priv->ndev, &cf); + skb = alloc_canfd_skb(ndev, &cf); else - skb = alloc_can_skb(priv->ndev, - (struct can_frame **)&cf); + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); } else { id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx)); dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx)); - skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); } if (!skb) { @@ -1735,7 +1791,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if (sts & RCANFD_RFFDSTS_RFESI) { cf->flags |= CANFD_ESI; - netdev_dbg(priv->ndev, "ESI Error\n"); + netdev_dbg(ndev, "ESI Error\n"); } if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) { @@ -1802,6 +1858,29 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) return num_pkts; } +static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv, + unsigned int ch) +{ + u32 sts = rcar_canfd_read(gpriv->base, rcar_canfd_f_cfdsts(gpriv, ch)); + u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts); + + return tdcr & (gpriv->info->tdc_const->tdcv_max - 1); +} + +static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + u32 tdco = priv->can.fd.tdc.tdco; + u32 tdcr; + + /* Transceiver Delay Compensation Result */ + tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1; + + *tdcv = tdcr < tdco ? 0 : tdcr - tdco; + + return 0; +} + static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) { int err; @@ -1818,10 +1897,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) } } -static int rcar_canfd_get_berr_counter(const struct net_device *dev, +static int rcar_canfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { - struct rcar_canfd_channel *priv = netdev_priv(dev); + struct rcar_canfd_channel *priv = netdev_priv(ndev); u32 val, ch = priv->channel; /* Peripheral clock is already enabled in probe */ @@ -1924,12 +2003,17 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, if (gpriv->fdmode) { priv->can.bittiming_const = gpriv->info->nom_bittiming; priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming; + priv->can.fd.tdc_const = gpriv->info->tdc_const; /* Controller starts in CAN FD only mode */ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); if (err) goto fail; - priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; + priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv; } else { /* Controller starts in Classical CAN only mode */ priv->can.bittiming_const = &rcar_canfd_bittiming_const; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index ec5c64006a16..5a95877b7419 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -388,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) mcp251x_spi_write(spi, 4); } -static void mcp251x_write_bits(struct spi_device *spi, u8 reg, - u8 mask, u8 val) +static int mcp251x_write_bits(struct spi_device *spi, u8 reg, + u8 mask, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -398,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[3] = val; - mcp251x_spi_write(spi, 4); + return mcp251x_spi_write(spi, 4); } static u8 mcp251x_read_stat(struct spi_device *spi) @@ -441,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct mcp251x_priv *priv = gpiochip_get_data(chip); + int ret; u8 val; /* nothing to be done for inputs */ @@ -450,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip, val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF); mutex_lock(&priv->mcp_lock); - mcp251x_write_bits(priv->spi, BFPCTRL, val, val); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val); mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; priv->reg_bfpctrl |= val; @@ -530,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip, return 0; } -static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 mask, val; + int ret; mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF); val = value ? mask : 0; mutex_lock(&priv->mcp_lock); - mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; priv->reg_bfpctrl &= ~mask; priv->reg_bfpctrl |= val; + + return 0; } -static void +static int mcp251x_gpio_set_multiple(struct gpio_chip *chip, unsigned long *maskp, unsigned long *bitsp) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 mask, val; + int ret; mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]); mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask); @@ -561,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip, val = FIELD_PREP(BFPCTRL_BFS_MASK, val); if (!mask) - return; + return 0; mutex_lock(&priv->mcp_lock); - mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); + ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); mutex_unlock(&priv->mcp_lock); + if (ret) + return ret; priv->reg_bfpctrl &= ~mask; priv->reg_bfpctrl |= val; + + return 0; } static void mcp251x_gpio_restore(struct spi_device *spi) @@ -594,8 +607,8 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv) gpio->get_direction = mcp251x_gpio_get_direction; gpio->get = mcp251x_gpio_get; gpio->get_multiple = mcp251x_gpio_get_multiple; - gpio->set = mcp251x_gpio_set; - gpio->set_multiple = mcp251x_gpio_set_multiple; + gpio->set_rv = mcp251x_gpio_set; + gpio->set_multiple_rv = mcp251x_gpio_set_multiple; gpio->base = -1; gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names); gpio->names = mcp251x_gpio_names; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index d924b053677b..6476add1c105 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -429,7 +429,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv) es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, &priv->can.fd.data_bittiming); - if (can_tdc_is_enabled(&priv->can)) { + if (can_fd_tdc_is_enabled(&priv->can)) { tx_conf_msg.tdc_enabled = 1; tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco); tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 3f2e378199ab..81baec8eb1e5 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -515,7 +515,7 @@ static int xcan_set_bittiming(struct net_device *ndev) priv->devtype.cantype == XAXI_CANFD_2_0) { /* Setting Baud Rate prescaler value in F_BRPR Register */ btr0 = dbt->brp - 1; - if (can_tdc_is_enabled(&priv->can)) { + if (can_fd_tdc_is_enabled(&priv->can)) { if (priv->devtype.cantype == XAXI_CANFD) btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) | XCAN_BRPR_TDC_ENABLE; diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig index ebaa4a80d544..915008e8eff5 100644 --- a/drivers/net/dsa/b53/Kconfig +++ b/drivers/net/dsa/b53/Kconfig @@ -5,6 +5,7 @@ menuconfig B53 select NET_DSA_TAG_NONE select NET_DSA_TAG_BRCM select NET_DSA_TAG_BRCM_LEGACY + select NET_DSA_TAG_BRCM_LEGACY_FCS select NET_DSA_TAG_BRCM_PREPEND help This driver adds support for Broadcom managed switch chips. It supports diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index dc2f4adac9bc..46978757c972 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -361,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); - /* Include IMP port in dumb forwarding mode - */ - b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); - mgmt |= B53_MII_DUMB_FWDG_EN; - b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); - - /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether - * frames should be flooded or not. - */ - b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; - b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + if (!is5325(dev)) { + /* Include IMP port in dumb forwarding mode */ + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt); + mgmt |= B53_MII_DUMB_FWDG_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); + + /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether + * frames should be flooded or not. + */ + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } else { + b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); + mgmt |= B53_IP_MCAST_25; + b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); + } } static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, @@ -487,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask) { unsigned int i; + if (is5325(dev)) + return 0; + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); @@ -511,6 +519,9 @@ out: static int b53_fast_age_port(struct b53_device *dev, int port) { + if (is5325(dev)) + return 0; + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); return b53_flush_arl(dev, FAST_AGE_PORT); @@ -518,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port) static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) { + if (is5325(dev)) + return 0; + b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); return b53_flush_arl(dev, FAST_AGE_VLAN); @@ -529,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) unsigned int i; u16 pvlan; + /* BCM5325 CPU port is at 8 */ + if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) + cpu_port = B53_CPU_PORT; + /* Enable the IMP port to be in the same VLAN as the other ports * on a per-port basis such that we only have Port i and IMP in * the same VLAN. @@ -546,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port, { u16 uc; - b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); - if (unicast) - uc |= BIT(port); - else - uc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); + if (is5325(dev)) { + if (port == B53_CPU_PORT_25) + port = B53_CPU_PORT; + + b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc); + if (unicast) + uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN; + else + uc &= ~BIT(port); + b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc); + } else { + b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); + if (unicast) + uc |= BIT(port); + else + uc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); + } } static void b53_port_set_mcast_flood(struct b53_device *dev, int port, @@ -559,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port, { u16 mc; - b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); - if (multicast) - mc |= BIT(port); - else - mc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); + if (is5325(dev)) { + if (port == B53_CPU_PORT_25) + port = B53_CPU_PORT; - b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); - if (multicast) - mc |= BIT(port); - else - mc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); + b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc); + if (multicast) + mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN; + else + mc &= ~BIT(port); + b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc); + } else { + b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); + + b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); + } } static void b53_port_set_learning(struct b53_device *dev, int port, @@ -579,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port, { u16 reg; + if (is5325(dev)) + return; + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); if (learning) reg &= ~BIT(port); @@ -615,6 +660,19 @@ int b53_setup_port(struct dsa_switch *ds, int port) if (dsa_is_user_port(ds, port)) b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED); + if (is5325(dev) && + in_range(port, 1, 4)) { + u8 reg; + + b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, ®); + reg &= ~PD_MODE_POWER_DOWN_PORT(0); + if (dsa_is_unused_port(ds, port)) + reg |= PD_MODE_POWER_DOWN_PORT(port); + else + reg &= ~PD_MODE_POWER_DOWN_PORT(port); + b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg); + } + return 0; } EXPORT_SYMBOL(b53_setup_port); @@ -713,6 +771,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) hdr_ctl |= GC_FRM_MGMT_PORT_M; b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl); + /* B53_BRCM_HDR not present on devices with legacy tags */ + if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY || + dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS) + return; + /* Enable Broadcom tags for IMP port */ b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); if (tag_en) @@ -1257,6 +1320,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link) if (port == dev->imp_port) { off = B53_PORT_OVERRIDE_CTRL; val = PORT_OVERRIDE_EN; + } else if (is5325(dev)) { + return; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); val = GMII_PO_EN; @@ -1281,6 +1346,8 @@ static void b53_force_port_config(struct b53_device *dev, int port, if (port == dev->imp_port) { off = B53_PORT_OVERRIDE_CTRL; val = PORT_OVERRIDE_EN; + } else if (is5325(dev)) { + return; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); val = GMII_PO_EN; @@ -1311,10 +1378,19 @@ static void b53_force_port_config(struct b53_device *dev, int port, return; } - if (rx_pause) - reg |= PORT_OVERRIDE_RX_FLOW; - if (tx_pause) - reg |= PORT_OVERRIDE_TX_FLOW; + if (rx_pause) { + if (is5325(dev)) + reg |= PORT_OVERRIDE_LP_FLOW_25; + else + reg |= PORT_OVERRIDE_RX_FLOW; + } + + if (tx_pause) { + if (is5325(dev)) + reg |= PORT_OVERRIDE_LP_FLOW_25; + else + reg |= PORT_OVERRIDE_TX_FLOW; + } b53_write8(dev, B53_CTRL_PAGE, off, reg); } @@ -1764,6 +1840,45 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; } +static int b53_arl_read_25(struct b53_device *dev, u64 mac, + u16 vid, struct b53_arl_entry *ent, u8 *idx) +{ + DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); + unsigned int i; + int ret; + + ret = b53_arl_op_wait(dev); + if (ret) + return ret; + + bitmap_zero(free_bins, dev->num_arl_bins); + + /* Read the bins */ + for (i = 0; i < dev->num_arl_bins; i++) { + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); + + b53_arl_to_entry_25(ent, mac_vid); + + if (!(mac_vid & ARLTBL_VALID_25)) { + set_bit(i, free_bins); + continue; + } + if ((mac_vid & ARLTBL_MAC_MASK) != mac) + continue; + if (dev->vlan_enabled && + ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid) + continue; + *idx = i; + return 0; + } + + *idx = find_first_bit(free_bins, dev->num_arl_bins); + return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; +} + static int b53_arl_op(struct b53_device *dev, int op, int port, const unsigned char *addr, u16 vid, bool is_valid) { @@ -1778,14 +1893,18 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); - b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + if (!is5325m(dev)) + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); /* Issue a read operation for this MAC */ ret = b53_arl_rw_op(dev, 1); if (ret) return ret; - ret = b53_arl_read(dev, mac, vid, &ent, &idx); + if (is5325(dev) || is5365(dev)) + ret = b53_arl_read_25(dev, mac, vid, &ent, &idx); + else + ret = b53_arl_read(dev, mac, vid, &ent, &idx); /* If this is a read, just finish now */ if (op) @@ -1829,12 +1948,17 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, ent.is_static = true; ent.is_age = false; memcpy(ent.mac, addr, ETH_ALEN); - b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); + if (is5325(dev) || is5365(dev)) + b53_arl_from_entry_25(&mac_vid, &ent); + else + b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); - b53_write32(dev, B53_ARLIO_PAGE, - B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); + + if (!is5325(dev) && !is5365(dev)) + b53_write32(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); return b53_arl_rw_op(dev, 0); } @@ -1846,12 +1970,6 @@ int b53_fdb_add(struct dsa_switch *ds, int port, struct b53_device *priv = ds->priv; int ret; - /* 5325 and 5365 require some more massaging, but could - * be supported eventually - */ - if (is5325(priv) || is5365(priv)) - return -EOPNOTSUPP; - mutex_lock(&priv->arl_mutex); ret = b53_arl_op(priv, 0, port, addr, vid, true); mutex_unlock(&priv->arl_mutex); @@ -1878,10 +1996,15 @@ EXPORT_SYMBOL(b53_fdb_del); static int b53_arl_search_wait(struct b53_device *dev) { unsigned int timeout = 1000; - u8 reg; + u8 reg, offset; + + if (is5325(dev) || is5365(dev)) + offset = B53_ARL_SRCH_CTL_25; + else + offset = B53_ARL_SRCH_CTL; do { - b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); + b53_read8(dev, B53_ARLIO_PAGE, offset, ®); if (!(reg & ARL_SRCH_STDN)) return 0; @@ -1898,13 +2021,24 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx, struct b53_arl_entry *ent) { u64 mac_vid; - u32 fwd_entry; - b53_read64(dev, B53_ARLIO_PAGE, - B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); - b53_read32(dev, B53_ARLIO_PAGE, - B53_ARL_SRCH_RSTL(idx), &fwd_entry); - b53_arl_to_entry(ent, mac_vid, fwd_entry); + if (is5325(dev)) { + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid); + } else if (is5365(dev)) { + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid); + } else { + u32 fwd_entry; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), + &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), + &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); + } } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, @@ -1925,14 +2059,20 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; unsigned int count = 0; + u8 offset; int ret; u8 reg; mutex_lock(&priv->arl_mutex); + if (is5325(priv) || is5365(priv)) + offset = B53_ARL_SRCH_CTL_25; + else + offset = B53_ARL_SRCH_CTL; + /* Start search operation */ reg = ARL_SRCH_STDN; - b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); + b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg); do { ret = b53_arl_search_wait(priv); @@ -2165,7 +2305,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) { - if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING)) + struct b53_device *dev = ds->priv; + unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); + + if (!is5325(dev)) + mask |= BR_LEARNING; + + if (flags.mask & ~mask) return -EINVAL; return 0; @@ -2241,8 +2387,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, goto out; } - /* Older models require a different 6 byte tag */ - if (is5325(dev) || is5365(dev) || is63xx(dev)) { + /* Older models require different 6 byte tags */ + if (is5325(dev) || is5365(dev)) { + dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS; + goto out; + } else if (is63xx(dev)) { dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY; goto out; } @@ -2830,6 +2979,9 @@ static int b53_switch_init(struct b53_device *dev) } } + if (is5325e(dev)) + dev->num_arl_buckets = 512; + dev->num_ports = fls(dev->enabled_ports); dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); @@ -2931,10 +3083,24 @@ int b53_switch_detect(struct b53_device *dev) b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); - if (tmp == 0xf) + if (tmp == 0xf) { + u32 phy_id; + int val; + dev->chip_id = BCM5325_DEVICE_ID; - else + + val = b53_phy_read16(dev->ds, 0, MII_PHYSID1); + phy_id = (val & 0xffff) << 16; + val = b53_phy_read16(dev->ds, 0, MII_PHYSID2); + phy_id |= (val & 0xfff0); + + if (phy_id == 0x00406330) + dev->variant_id = B53_VARIANT_5325M; + else if (phy_id == 0x0143bc30) + dev->variant_id = B53_VARIANT_5325E; + } else { dev->chip_id = BCM5365_DEVICE_ID; + } break; case BCM5389_DEVICE_ID: case BCM5395_DEVICE_ID: diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index a5ef7071ba07..b1b9e8882ba4 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -84,6 +84,12 @@ enum { BCM53134_DEVICE_ID = 0x5075, }; +enum b53_variant_id { + B53_VARIANT_NONE = 0, + B53_VARIANT_5325E, + B53_VARIANT_5325M, +}; + struct b53_pcs { struct phylink_pcs pcs; struct b53_device *dev; @@ -118,6 +124,7 @@ struct b53_device { /* chip specific data */ u32 chip_id; + enum b53_variant_id variant_id; u8 core_rev; u8 vta_regs[3]; u8 duplex_reg; @@ -165,6 +172,18 @@ static inline int is5325(struct b53_device *dev) return dev->chip_id == BCM5325_DEVICE_ID; } +static inline int is5325e(struct b53_device *dev) +{ + return is5325(dev) && + dev->variant_id == B53_VARIANT_5325E; +} + +static inline int is5325m(struct b53_device *dev) +{ + return is5325(dev) && + dev->variant_id == B53_VARIANT_5325M; +} + static inline int is5365(struct b53_device *dev) { #ifdef CONFIG_BCM47XX @@ -298,6 +317,19 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, ent->vid = mac_vid >> ARLTBL_VID_S; } +static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent, + u64 mac_vid) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) & + ARLTBL_DATA_PORT_ID_MASK_25; + ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); + ent->is_age = !!(mac_vid & ARLTBL_AGE_25); + ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S_65; +} + static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, const struct b53_arl_entry *ent) { @@ -312,6 +344,22 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE; } +static inline void b53_arl_from_entry_25(u64 *mac_vid, + const struct b53_arl_entry *ent) +{ + *mac_vid = ether_addr_to_u64(ent->mac); + *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) << + ARLTBL_DATA_PORT_ID_S_25; + *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) << + ARLTBL_VID_S_65; + if (ent->is_valid) + *mac_vid |= ARLTBL_VALID_25; + if (ent->is_static) + *mac_vid |= ARLTBL_STATIC_25; + if (ent->is_age) + *mac_vid |= ARLTBL_AGE_25; +} + #ifdef CONFIG_BCM47XX #include <linux/bcm47xx_nvram.h> diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 1fbc5a204bc7..309fe0e46dad 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -29,6 +29,7 @@ #define B53_ARLIO_PAGE 0x05 /* ARL Access */ #define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */ #define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */ +#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */ /* PHY Registers */ #define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */ @@ -95,17 +96,22 @@ #define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S) #define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S) #define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */ #define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */ #define PORT_OVERRIDE_RX_FLOW BIT(4) #define PORT_OVERRIDE_TX_FLOW BIT(5) #define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */ #define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */ -/* Power-down mode control */ +/* Power-down mode control (8 bit) */ #define B53_PD_MODE_CTRL_25 0x0f +#define PD_MODE_PORT_MASK 0x1f +/* Bit 0 also powers down the switch. */ +#define PD_MODE_POWER_DOWN_PORT(i) BIT(i) /* IP Multicast control (8 bit) */ #define B53_IP_MULTICAST_CTRL 0x21 +#define B53_IP_MCAST_25 BIT(0) #define B53_IPMC_FWD_EN BIT(1) #define B53_UC_FWD_EN BIT(6) #define B53_MC_FWD_EN BIT(7) @@ -324,9 +330,10 @@ #define ARLTBL_VID_MASK 0xfff #define ARLTBL_DATA_PORT_ID_S_25 48 #define ARLTBL_DATA_PORT_ID_MASK_25 0xf -#define ARLTBL_AGE_25 BIT(61) -#define ARLTBL_STATIC_25 BIT(62) -#define ARLTBL_VALID_25 BIT(63) +#define ARLTBL_VID_S_65 53 +#define ARLTBL_AGE_25 BIT_ULL(61) +#define ARLTBL_STATIC_25 BIT_ULL(62) +#define ARLTBL_VALID_25 BIT_ULL(63) /* ARL Table Data Entry N Registers (32 bit) */ #define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18) @@ -366,6 +373,18 @@ #define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10)) /************************************************************************* + * IEEE 802.1X Registers + *************************************************************************/ + +/* Multicast DLF Drop Control register (16 bit) */ +#define B53_IEEE_MCAST_DLF 0x94 +#define B53_IEEE_MCAST_DROP_EN BIT(11) + +/* Unicast DLF Drop Control register (16 bit) */ +#define B53_IEEE_UCAST_DLF 0x96 +#define B53_IEEE_UCAST_DROP_EN BIT(11) + +/************************************************************************* * Port VLAN Registers *************************************************************************/ diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7c142c17b3f6..6e1daf0018bc 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2786,8 +2786,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq) kirq->dev = dev; kirq->masked = ~0; - kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node), - kirq->nirqs, 0, + kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0, &ksz_irq_domain_ops, kirq); if (!kirq->domain) return -ENOMEM; diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 8ab664e85f13..35fc21b1ee48 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -1130,8 +1130,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p) init_completion(&port->tstamp_msg_comp); - ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node), - ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq); + ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs, + &ksz_ptp_irq_domain_ops, ptpirq); if (!ptpirq->domain) return -ENOMEM; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index df213c37b4fe..e5bed4237ff4 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2112,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset) return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit); } -static void +static int mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct mt7530_priv *priv = gpiochip_get_data(gc); @@ -2122,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) mt7530_set(priv, MT7530_LED_GPIO_DATA, bit); else mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit); + + return 0; } static int @@ -2185,7 +2187,7 @@ mt7530_setup_gpio(struct mt7530_priv *priv) gc->direction_input = mt7530_gpio_direction_input; gc->direction_output = mt7530_gpio_direction_output; gc->get = mt7530_gpio_get; - gc->set = mt7530_gpio_set; + gc->set_rv = mt7530_gpio_set; gc->base = -1; gc->ngpio = 15; gc->can_sleep = true; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index aaf97c1e3167..30a6ffa7817b 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1154,10 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) if (err) return err; - chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node), - 16, 0, - &mv88e6xxx_g2_irq_domain_ops, - chip); + chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0, + &mv88e6xxx_g2_irq_domain_ops, chip); if (!chip->g2_irq.domain) return -ENOMEM; diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 79a29676ca6f..0526aa96146e 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv) return ret; } - priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1, - &ar9331_sw_irqdomain_ops, priv); + priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops, + priv); if (!priv->irqdomain) { dev_err(dev, "failed to create IRQ domain\n"); return -EINVAL; diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index f18aa321053d..4f9687ab3b2b 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -2258,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(val & BIT(offset)); } -static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset, - int val) +static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) { struct vsc73xx *vsc = gpiochip_get_data(chip); u32 tmp = val ? BIT(offset) : 0; - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, - VSC73XX_GPIO, BIT(offset), tmp); + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset), tmp); } static int vsc73xx_gpio_direction_output(struct gpio_chip *chip, @@ -2317,7 +2317,7 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc) vsc->gc.parent = vsc->dev; vsc->gc.base = -1; vsc->gc.get = vsc73xx_gpio_get; - vsc->gc.set = vsc73xx_gpio_set; + vsc->gc.set_rv = vsc73xx_gpio_set; vsc->gc.direction_input = vsc73xx_gpio_direction_input; vsc->gc.direction_output = vsc73xx_gpio_direction_output; vsc->gc.get_direction = vsc73xx_gpio_get_direction; diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index a7ec609d64de..06dea3a13e77 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -1065,23 +1065,18 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) { + int size, index, num_desc = HW_DSCP_NUM; struct airoha_eth *eth = qdma->eth; int id = qdma - ð->qdma[0]; + u32 status, buf_size; dma_addr_t dma_addr; const char *name; - int size, index; - u32 status; - - size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); - if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) - return -ENOMEM; - - airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id); if (!name) return -ENOMEM; + buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE; index = of_property_match_string(eth->dev->of_node, "memory-region-names", name); if (index >= 0) { @@ -1099,8 +1094,12 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) rmem = of_reserved_mem_lookup(np); of_node_put(np); dma_addr = rmem->base; + /* Compute the number of hw descriptors according to the + * reserved memory size and the payload buffer size + */ + num_desc = div_u64(rmem->size, buf_size); } else { - size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; + size = buf_size * num_desc; if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) return -ENOMEM; @@ -1108,15 +1107,21 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); + size = num_desc * sizeof(struct airoha_qdma_fwd_desc); + if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) + return -ENOMEM; + + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); + /* QDMA0: 2KB. QDMA1: 1KB */ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, HW_FWD_DSCP_PAYLOAD_SIZE_MASK, - FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); + FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id)); airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, LMGR_INIT_START | LMGR_SRAM_MODE_MASK | HW_FWD_DESC_NUM_MASK, - FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | + FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) | LMGR_INIT_START | LMGR_SRAM_MODE_MASK); return read_poll_timeout(airoha_qdma_rr, status, diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 9067d2fc7706..c354d536bc66 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) | FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) | FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) | AIROHA_FOE_IB1_BIND_TTL; hwe->ib1 = val; @@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, hwe->ipv6.data = qdata; hwe->ipv6.ib2 = val; l2 = &hwe->ipv6.l2; + l2->etype = ETH_P_IPV6; } else { hwe->ipv4.data = qdata; hwe->ipv4.ib2 = val; l2 = &hwe->ipv4.l2.common; + l2->etype = ETH_P_IP; } l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest); l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4); if (type <= PPE_PKT_TYPE_IPV4_DSLITE) { + struct airoha_foe_mac_info *mac_info; + l2->src_mac_hi = get_unaligned_be32(data->eth.h_source); hwe->ipv4.l2.src_mac_lo = get_unaligned_be16(data->eth.h_source + 4); + + mac_info = (struct airoha_foe_mac_info *)l2; + mac_info->pppoe_id = data->pppoe.sid; } else { - l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id); + l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) | + FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID, + data->pppoe.sid); } if (data->vlan.num) { - l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0; l2->vlan1 = data->vlan.hdr[0].id; if (data->vlan.num == 2) l2->vlan2 = data->vlan.hdr[1].id; - } else if (dsa_port >= 0) { - l2->etype = BIT(15) | BIT(dsa_port); - } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { - l2->etype = ETH_P_IPV6; - } else { - l2->etype = ETH_P_IP; + } + + if (dsa_port >= 0) { + l2->etype = BIT(dsa_port); + l2->etype |= !data->vlan.num ? BIT(15) : 0; + } else if (data->pppoe.num) { + l2->etype = ETH_P_PPP_SES; } return 0; @@ -809,8 +819,10 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, int idle; hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); - ib1 = READ_ONCE(hwe->ib1); + if (!hwe) + continue; + ib1 = READ_ONCE(hwe->ib1); state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); if (state != AIROHA_FOE_STATE_BIND) { iter->hash = 0xffff; @@ -957,6 +969,11 @@ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, case FLOW_ACTION_VLAN_POP: break; case FLOW_ACTION_PPPOE_PUSH: + if (data.pppoe.num == 1 || data.vlan.num == 2) + return -EOPNOTSUPP; + + data.pppoe.sid = act->pppoe.sid; + data.pppoe.num++; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig index c37fa393b99e..95dcc3969f0c 100644 --- a/drivers/net/ethernet/amazon/Kconfig +++ b/drivers/net/ethernet/amazon/Kconfig @@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON config ENA_ETHERNET tristate "Elastic Network Adapter (ENA) support" depends on PCI_MSI && !CPU_BIG_ENDIAN + depends on PTP_1588_CLOCK_OPTIONAL select DIMLIB + select NET_DEVLINK help This driver supports Elastic Network Adapter (ENA)" diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile index 6ab615365172..6d8036bc1823 100644 --- a/drivers/net/ethernet/amazon/ena/Makefile +++ b/drivers/net/ethernet/amazon/ena/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_ENA_ETHERNET) += ena.o -ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o +ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 9d9fa6559354..562869a0fdba 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id { ENA_ADMIN_AENQ_CONFIG = 26, ENA_ADMIN_LINK_CONFIG = 27, ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_PHC_CONFIG = 29, ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; @@ -127,6 +128,14 @@ enum ena_admin_get_stats_scope { ENA_ADMIN_ETH_TRAFFIC = 1, }; +enum ena_admin_phc_type { + ENA_ADMIN_PHC_TYPE_READLESS = 0, +}; + +enum ena_admin_phc_error_flags { + ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0), +}; + /* ENA SRD configuration for ENI */ enum ena_admin_ena_srd_flags { /* Feature enabled */ @@ -943,7 +952,9 @@ struct ena_admin_host_info { * 4 : rss_configurable_function_key * 5 : reserved * 6 : rx_page_reuse - * 31:7 : reserved + * 7 : reserved + * 8 : phc + * 31:9 : reserved */ u32 driver_supported_features; }; @@ -1023,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc { }; }; +struct ena_admin_feature_phc_desc { + /* PHC type as defined in enum ena_admin_get_phc_type, + * used only for GET command. + */ + u8 type; + + /* Reserved - MBZ */ + u8 reserved1[3]; + + /* PHC doorbell address as an offset to PCIe MMIO REG BAR, + * used only for GET command. + */ + u32 doorbell_offset; + + /* Max time for valid PHC retrieval, passing this threshold will + * fail the get-time request and block PHC requests for + * block_timeout_usec, used only for GET command. + */ + u32 expire_timeout_usec; + + /* PHC requests block period, blocking starts if PHC request expired + * in order to prevent floods on busy device, + * used only for GET command. + */ + u32 block_timeout_usec; + + /* Shared PHC physical address (ena_admin_phc_resp), + * used only for SET command. + */ + struct ena_common_mem_addr output_address; + + /* Shared PHC Size (ena_admin_phc_resp), + * used only for SET command. + */ + u32 output_length; +}; + struct ena_admin_get_feat_resp { struct ena_admin_acq_common_desc acq_common_desc; @@ -1052,6 +1100,8 @@ struct ena_admin_get_feat_resp { struct ena_admin_feature_intr_moder_desc intr_moderation; struct ena_admin_ena_hw_hints hw_hints; + + struct ena_admin_feature_phc_desc phc; } u; }; @@ -1085,6 +1135,9 @@ struct ena_admin_set_feat_cmd { /* LLQ configuration */ struct ena_admin_feature_llq_desc llq; + + /* PHC configuration */ + struct ena_admin_feature_phc_desc phc; } u; }; @@ -1162,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp { u32 reg_val; }; +struct ena_admin_phc_resp { + /* Request Id, received from DB register */ + u16 req_id; + + u8 reserved1[6]; + + /* PHC timestamp (nsec) */ + u64 timestamp; + + u8 reserved2[12]; + + /* Bit field of enum ena_admin_phc_error_flags */ + u32 error_flags; + + u8 reserved3[32]; +}; + /* aq_common_desc */ #define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) #define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) @@ -1260,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) #define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6 #define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6) +#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8) /* aenq_common_desc */ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 66445617fbfb..e67b592e5697 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -41,6 +41,12 @@ #define ENA_MAX_ADMIN_POLL_US 5000 +/* PHC definitions */ +#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10 +#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000 +#define ENA_PHC_REQ_ID_OFFSET 0xDEAD +#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP) + /*****************************************************************************/ /*****************************************************************************/ /*****************************************************************************/ @@ -1641,6 +1647,267 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) ena_dev->admin_queue.polling = polling; } +bool ena_com_phc_supported(struct ena_com_dev *ena_dev) +{ + return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG); +} + +int ena_com_phc_init(struct ena_com_dev *ena_dev) +{ + struct ena_com_phc_info *phc = &ena_dev->phc; + + memset(phc, 0x0, sizeof(*phc)); + + /* Allocate shared mem used PHC timestamp retrieved from device */ + phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev, + sizeof(*phc->virt_addr), + &phc->phys_addr, + GFP_KERNEL); + if (unlikely(!phc->virt_addr)) + return -ENOMEM; + + spin_lock_init(&phc->lock); + + phc->virt_addr->req_id = 0; + phc->virt_addr->timestamp = 0; + + return 0; +} + +int ena_com_phc_config(struct ena_com_dev *ena_dev) +{ + struct ena_com_phc_info *phc = &ena_dev->phc; + struct ena_admin_get_feat_resp get_feat_resp; + struct ena_admin_set_feat_resp set_feat_resp; + struct ena_admin_set_feat_cmd set_feat_cmd; + int ret = 0; + + /* Get device PHC default configuration */ + ret = ena_com_get_feature(ena_dev, + &get_feat_resp, + ENA_ADMIN_PHC_CONFIG, + 0); + if (unlikely(ret)) { + netdev_err(ena_dev->net_device, + "Failed to get PHC feature configuration, error: %d\n", + ret); + return ret; + } + + /* Supporting only readless PHC retrieval */ + if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) { + netdev_err(ena_dev->net_device, + "Unsupported PHC type, error: %d\n", + -EOPNOTSUPP); + return -EOPNOTSUPP; + } + + /* Update PHC doorbell offset according to device value, + * used to write req_id to PHC bar + */ + phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset; + + /* Update PHC expire timeout according to device + * or default driver value + */ + phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ? + get_feat_resp.u.phc.expire_timeout_usec : + ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC; + + /* Update PHC block timeout according to device + * or default driver value + */ + phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ? + get_feat_resp.u.phc.block_timeout_usec : + ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC; + + /* Sanity check - expire timeout must not exceed block timeout */ + if (phc->expire_timeout_usec > phc->block_timeout_usec) + phc->expire_timeout_usec = phc->block_timeout_usec; + + /* Prepare PHC feature command */ + memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd)); + set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG; + set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr); + ret = ena_com_mem_addr_set(ena_dev, + &set_feat_cmd.u.phc.output_address, + phc->phys_addr); + if (unlikely(ret)) { + netdev_err(ena_dev->net_device, + "Failed setting PHC output address, error: %d\n", + ret); + return ret; + } + + /* Send PHC feature command to the device */ + ret = ena_com_execute_admin_command(&ena_dev->admin_queue, + (struct ena_admin_aq_entry *)&set_feat_cmd, + sizeof(set_feat_cmd), + (struct ena_admin_acq_entry *)&set_feat_resp, + sizeof(set_feat_resp)); + + if (unlikely(ret)) { + netdev_err(ena_dev->net_device, + "Failed to enable PHC, error: %d\n", + ret); + return ret; + } + + phc->active = true; + netdev_dbg(ena_dev->net_device, "PHC is active in the device\n"); + + return ret; +} + +void ena_com_phc_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_phc_info *phc = &ena_dev->phc; + unsigned long flags = 0; + + /* In case PHC is not supported by the device, silently exiting */ + if (!phc->virt_addr) + return; + + spin_lock_irqsave(&phc->lock, flags); + phc->active = false; + spin_unlock_irqrestore(&phc->lock, flags); + + dma_free_coherent(ena_dev->dmadev, + sizeof(*phc->virt_addr), + phc->virt_addr, + phc->phys_addr); + phc->virt_addr = NULL; +} + +int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp) +{ + volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr; + const ktime_t zero_system_time = ktime_set(0, 0); + struct ena_com_phc_info *phc = &ena_dev->phc; + ktime_t expire_time; + ktime_t block_time; + unsigned long flags = 0; + int ret = 0; + + if (!phc->active) { + netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n"); + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&phc->lock, flags); + + /* Check if PHC is in blocked state */ + if (unlikely(ktime_compare(phc->system_time, zero_system_time))) { + /* Check if blocking time expired */ + block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec); + if (!ktime_after(ktime_get(), block_time)) { + /* PHC is still in blocked state, skip PHC request */ + phc->stats.phc_skp++; + ret = -EBUSY; + goto skip; + } + + /* PHC is in active state, update statistics according + * to req_id and error_flags + */ + if (READ_ONCE(resp->req_id) != phc->req_id) { + /* Device didn't update req_id during blocking time, + * this indicates on a device error + */ + netdev_err(ena_dev->net_device, + "PHC get time request 0x%x failed (device error)\n", + phc->req_id); + phc->stats.phc_err_dv++; + } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) { + /* Device updated req_id during blocking time but got + * a PHC error, this occurs if device: + * - exceeded the get time request limit + * - received an invalid timestamp + */ + netdev_err(ena_dev->net_device, + "PHC get time request 0x%x failed (error 0x%x)\n", + phc->req_id, + resp->error_flags); + phc->stats.phc_err_ts += !!(resp->error_flags & + ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP); + } else { + /* Device updated req_id during blocking time + * with valid timestamp + */ + phc->stats.phc_exp++; + } + } + + /* Setting relative timeouts */ + phc->system_time = ktime_get(); + block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec); + expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec); + + /* We expect the device to return this req_id once + * the new PHC timestamp is updated + */ + phc->req_id++; + + /* Initialize PHC shared memory with different req_id value + * to be able to identify once the device changes it to req_id + */ + resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET; + + /* Writing req_id to PHC bar */ + writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset); + + /* Stalling until the device updates req_id */ + while (1) { + if (unlikely(ktime_after(ktime_get(), expire_time))) { + /* Gave up waiting for updated req_id, PHC enters into + * blocked state until passing blocking time, + * during this time any get PHC timestamp will fail with + * device busy error + */ + ret = -EBUSY; + break; + } + + /* Check if req_id was updated by the device */ + if (READ_ONCE(resp->req_id) != phc->req_id) { + /* req_id was not updated by the device yet, + * check again on next loop + */ + continue; + } + + /* req_id was updated by the device which indicates that + * PHC timestamp and error_flags are updated too, + * checking errors before retrieving timestamp + */ + if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) { + /* Retrieved invalid PHC timestamp, PHC enters into + * blocked state until passing blocking time, + * during this time any get PHC timestamp requests + * will fail with device busy error + */ + ret = -EBUSY; + break; + } + + /* PHC timestamp value is returned to the caller */ + *timestamp = resp->timestamp; + + /* Update statistic on valid PHC timestamp retrieval */ + phc->stats.phc_cnt++; + + /* This indicates PHC state is active */ + phc->system_time = zero_system_time; + break; + } + +skip: + spin_unlock_irqrestore(&phc->lock, flags); + + return ret; +} + int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 9414e93d107b..64df2c48c9a6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -210,6 +210,14 @@ struct ena_com_stats_admin { u64 no_completion; }; +struct ena_com_stats_phc { + u64 phc_cnt; + u64 phc_exp; + u64 phc_skp; + u64 phc_err_dv; + u64 phc_err_ts; +}; + struct ena_com_admin_queue { void *q_dmadev; struct ena_com_dev *ena_dev; @@ -258,6 +266,47 @@ struct ena_com_mmio_read { spinlock_t lock; }; +/* PTP hardware clock (PHC) MMIO read data info */ +struct ena_com_phc_info { + /* Internal PHC statistics */ + struct ena_com_stats_phc stats; + + /* PHC shared memory - virtual address */ + struct ena_admin_phc_resp *virt_addr; + + /* System time of last PHC request */ + ktime_t system_time; + + /* Spin lock to ensure a single outstanding PHC read */ + spinlock_t lock; + + /* PHC doorbell address as an offset to PCIe MMIO REG BAR */ + u32 doorbell_offset; + + /* Shared memory read expire timeout (usec) + * Max time for valid PHC retrieval, passing this threshold will fail + * the get time request and block new PHC requests for block_timeout_usec + * in order to prevent floods on busy device + */ + u32 expire_timeout_usec; + + /* Shared memory read abort timeout (usec) + * PHC requests block period, blocking starts once PHC request expired + * in order to prevent floods on busy device, + * any PHC requests during block period will be skipped + */ + u32 block_timeout_usec; + + /* PHC shared memory - physical address */ + dma_addr_t phys_addr; + + /* Request id sent to the device */ + u16 req_id; + + /* True if PHC is active in the device */ + bool active; +}; + struct ena_rss { /* Indirect table */ u16 *host_rss_ind_tbl; @@ -317,6 +366,7 @@ struct ena_com_dev { u32 ena_min_poll_delay_us; struct ena_com_mmio_read mmio_read; + struct ena_com_phc_info phc; struct ena_rss rss; u32 supported_features; @@ -382,6 +432,40 @@ struct ena_aenq_handlers { */ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); +/* ena_com_phc_init - Allocate and initialize PHC feature + * @ena_dev: ENA communication layer struct + * @note: This method assumes PHC is supported by the device + * @return - 0 on success, negative value on failure + */ +int ena_com_phc_init(struct ena_com_dev *ena_dev); + +/* ena_com_phc_supported - Return if PHC feature is supported by the device + * @ena_dev: ENA communication layer struct + * @note: This method must be called after getting supported features + * @return - supported or not + */ +bool ena_com_phc_supported(struct ena_com_dev *ena_dev); + +/* ena_com_phc_config - Configure PHC feature + * @ena_dev: ENA communication layer struct + * Configure PHC feature in driver and device + * @note: This method assumes PHC is supported by the device + * @return - 0 on success, negative value on failure + */ +int ena_com_phc_config(struct ena_com_dev *ena_dev); + +/* ena_com_phc_destroy - Destroy PHC feature + * @ena_dev: ENA communication layer struct + */ +void ena_com_phc_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_phc_get_timestamp - Retrieve PHC timestamp + * @ena_dev: ENA communication layer struct + * @timestamp: Retrieved PHC timestamp + * @return - 0 on success, negative value on failure + */ +int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp); + /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism * @ena_dev: ENA communication layer struct * @readless_supported: readless mode (enable/disable) diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c new file mode 100644 index 000000000000..46ed80986724 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/seq_file.h> +#include <linux/pci.h> +#include "ena_debugfs.h" +#include "ena_phc.h" + +static int phc_stats_show(struct seq_file *file, void *priv) +{ + struct ena_adapter *adapter = file->private; + + if (!ena_phc_is_active(adapter)) + return 0; + + seq_printf(file, + "phc_cnt: %llu\n", + adapter->ena_dev->phc.stats.phc_cnt); + seq_printf(file, + "phc_exp: %llu\n", + adapter->ena_dev->phc.stats.phc_exp); + seq_printf(file, + "phc_skp: %llu\n", + adapter->ena_dev->phc.stats.phc_skp); + seq_printf(file, + "phc_err_dv: %llu\n", + adapter->ena_dev->phc.stats.phc_err_dv); + seq_printf(file, + "phc_err_ts: %llu\n", + adapter->ena_dev->phc.stats.phc_err_ts); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(phc_stats); + +void ena_debugfs_init(struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + adapter->debugfs_base = + debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL); + + debugfs_create_file("phc_stats", + 0400, + adapter->debugfs_base, + adapter, + &phc_stats_fops); +} + +void ena_debugfs_terminate(struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + debugfs_remove_recursive(adapter->debugfs_base); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h new file mode 100644 index 000000000000..dc61dd998867 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef __ENA_DEBUGFS_H__ +#define __ENA_DEBUGFS_H__ + +#include <linux/debugfs.h> +#include <linux/netdevice.h> +#include "ena_netdev.h" + +#ifdef CONFIG_DEBUG_FS + +void ena_debugfs_init(struct net_device *dev); + +void ena_debugfs_terminate(struct net_device *dev); + +#else /* CONFIG_DEBUG_FS */ + +static inline void ena_debugfs_init(struct net_device *dev) {} + +static inline void ena_debugfs_terminate(struct net_device *dev) {} + +#endif /* CONFIG_DEBUG_FS */ + +#endif /* __ENA_DEBUGFS_H__ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c new file mode 100644 index 000000000000..ac81c24016dd --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#include "linux/pci.h" +#include "ena_devlink.h" +#include "ena_phc.h" + +static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + + if (!val.vbool) + return 0; + + if (!ena_com_phc_supported(adapter->ena_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC"); + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct devlink_param ena_devlink_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_PHC, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, + NULL, + ena_devlink_enable_phc_validate), +}; + +void ena_devlink_params_get(struct devlink *devlink) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + union devlink_param_value val; + int err; + + err = devl_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC, + &val); + if (err) { + netdev_err(adapter->netdev, "Failed to query PHC param\n"); + return; + } + + ena_phc_enable(adapter, val.vbool); +} + +void ena_devlink_disable_phc_param(struct devlink *devlink) +{ + union devlink_param_value value; + + value.vbool = false; + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC, + value); +} + +static void ena_devlink_port_register(struct devlink *devlink) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + struct devlink_port_attrs attrs = {}; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + devlink_port_attrs_set(&adapter->devlink_port, &attrs); + devl_port_register(devlink, &adapter->devlink_port, 0); +} + +static void ena_devlink_port_unregister(struct devlink *devlink) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + + devl_port_unregister(&adapter->devlink_port); +} + +static int ena_devlink_reload_down(struct devlink *devlink, + bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + + if (netns_change) { + NL_SET_ERR_MSG_MOD(extack, + "Namespace change is not supported"); + return -EOPNOTSUPP; + } + + ena_devlink_port_unregister(devlink); + + rtnl_lock(); + ena_destroy_device(adapter, false); + rtnl_unlock(); + + return 0; +} + +static int ena_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + int err = 0; + + rtnl_lock(); + /* Check that no other routine initialized the device (e.g. + * ena_fw_reset_device()). Also we're under devlink_mutex here, + * so devlink isn't freed under our feet. + */ + if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) + err = ena_restore_device(adapter); + + rtnl_unlock(); + + ena_devlink_port_register(devlink); + + if (!err) + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); + + return err; +} + +static const struct devlink_ops ena_devlink_ops = { + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = ena_devlink_reload_down, + .reload_up = ena_devlink_reload_up, +}; + +static int ena_devlink_configure_params(struct devlink *devlink) +{ + struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink); + union devlink_param_value value; + int rc; + + rc = devlink_params_register(devlink, ena_devlink_params, + ARRAY_SIZE(ena_devlink_params)); + if (rc) { + netdev_err(adapter->netdev, "Failed to register devlink params\n"); + return rc; + } + + value.vbool = ena_phc_is_enabled(adapter); + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC, + value); + + return 0; +} + +struct devlink *ena_devlink_alloc(struct ena_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + struct devlink *devlink; + + devlink = devlink_alloc(&ena_devlink_ops, + sizeof(struct ena_adapter *), + dev); + if (!devlink) { + netdev_err(adapter->netdev, + "Failed to allocate devlink struct\n"); + return NULL; + } + + ENA_DEVLINK_PRIV(devlink) = adapter; + adapter->devlink = devlink; + + if (ena_devlink_configure_params(devlink)) + goto free_devlink; + + return devlink; + +free_devlink: + devlink_free(devlink); + return NULL; +} + +static void ena_devlink_configure_params_clean(struct devlink *devlink) +{ + devlink_params_unregister(devlink, ena_devlink_params, + ARRAY_SIZE(ena_devlink_params)); +} + +void ena_devlink_free(struct devlink *devlink) +{ + ena_devlink_configure_params_clean(devlink); + + devlink_free(devlink); +} + +void ena_devlink_register(struct devlink *devlink, struct device *dev) +{ + devl_lock(devlink); + ena_devlink_port_register(devlink); + devl_register(devlink); + devl_unlock(devlink); +} + +void ena_devlink_unregister(struct devlink *devlink) +{ + devl_lock(devlink); + ena_devlink_port_unregister(devlink); + devl_unregister(devlink); + devl_unlock(devlink); +} diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h new file mode 100644 index 000000000000..7a19ce4830d9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ +#ifndef DEVLINK_H +#define DEVLINK_H + +#include "ena_netdev.h" +#include <net/devlink.h> + +#define ENA_DEVLINK_PRIV(devlink) \ + (*(struct ena_adapter **)devlink_priv(devlink)) + +struct devlink *ena_devlink_alloc(struct ena_adapter *adapter); +void ena_devlink_free(struct devlink *devlink); +void ena_devlink_register(struct devlink *devlink, struct device *dev); +void ena_devlink_unregister(struct devlink *devlink); +void ena_devlink_params_get(struct devlink *devlink); +void ena_devlink_disable_phc_param(struct devlink *devlink); + +#endif /* DEVLINK_H */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index a3c934c3de71..a81d3a7a3bb9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -5,9 +5,11 @@ #include <linux/ethtool.h> #include <linux/pci.h> +#include <linux/net_tstamp.h> #include "ena_netdev.h" #include "ena_xdp.h" +#include "ena_phc.h" struct ena_stats { char name[ETH_GSTRING_LEN]; @@ -298,6 +300,18 @@ static void ena_get_ethtool_stats(struct net_device *netdev, ena_get_stats(adapter, data, true); } +static int ena_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; + + info->phc_index = ena_phc_get_index(adapter); + + return 0; +} + static int ena_get_sw_stats_count(struct ena_adapter *adapter) { return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) @@ -721,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields) return data; } -static int ena_get_rss_hash(struct ena_com_dev *ena_dev, - struct ethtool_rxnfc *cmd) +static int ena_get_rxfh_fields(struct net_device *netdev, + struct ethtool_rxfh_fields *cmd) { + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; enum ena_admin_flow_hash_proto proto; u16 hash_fields; int rc; @@ -772,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev, return 0; } -static int ena_set_rss_hash(struct ena_com_dev *ena_dev, - struct ethtool_rxnfc *cmd) +static int ena_set_rxfh_fields(struct net_device *netdev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) { + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; enum ena_admin_flow_hash_proto proto; u16 hash_fields; @@ -816,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev, return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); } -static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) -{ - struct ena_adapter *adapter = netdev_priv(netdev); - int rc = 0; - - switch (info->cmd) { - case ETHTOOL_SRXFH: - rc = ena_set_rss_hash(adapter->ena_dev, info); - break; - case ETHTOOL_SRXCLSRLDEL: - case ETHTOOL_SRXCLSRLINS: - default: - netif_err(adapter, drv, netdev, - "Command parameter %d is not supported\n", info->cmd); - rc = -EOPNOTSUPP; - } - - return rc; -} - static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { @@ -847,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, info->data = adapter->num_io_queues; rc = 0; break; - case ETHTOOL_GRXFH: - rc = ena_get_rss_hash(adapter->ena_dev, info); - break; case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: @@ -1098,16 +1094,17 @@ static const struct ethtool_ops ena_ethtool_ops = { .get_strings = ena_get_ethtool_strings, .get_ethtool_stats = ena_get_ethtool_stats, .get_rxnfc = ena_get_rxnfc, - .set_rxnfc = ena_set_rxnfc, .get_rxfh_indir_size = ena_get_rxfh_indir_size, .get_rxfh_key_size = ena_get_rxfh_key_size, .get_rxfh = ena_get_rxfh, .set_rxfh = ena_set_rxfh, + .get_rxfh_fields = ena_get_rxfh_fields, + .set_rxfh_fields = ena_set_rxfh_fields, .get_channels = ena_get_channels, .set_channels = ena_set_channels, .get_tunable = ena_get_tunable, .set_tunable = ena_set_tunable, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = ena_get_ts_info, }; void ena_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 86fd08f375df..92d149d4f091 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -19,6 +19,12 @@ #include "ena_pci_id_tbl.h" #include "ena_xdp.h" +#include "ena_phc.h" + +#include "ena_devlink.h" + +#include "ena_debugfs.h" + MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); MODULE_DESCRIPTION(DEVICE_NAME); MODULE_LICENSE("GPL"); @@ -39,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl); static int ena_rss_init_default(struct ena_adapter *adapter); static void check_for_admin_com_state(struct ena_adapter *adapter); -static int ena_destroy_device(struct ena_adapter *adapter, bool graceful); -static int ena_restore_device(struct ena_adapter *adapter); static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { @@ -2743,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK | - ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK; + ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK | + ENA_ADMIN_HOST_INFO_PHC_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -3135,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, goto err_mmio_read_less; } + ena_devlink_params_get(adapter->devlink); + /* ENA admin level init */ rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (rc) { @@ -3188,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, if (unlikely(rc)) goto err_admin_init; + rc = ena_phc_init(adapter); + if (unlikely(rc && (rc != -EOPNOTSUPP))) + netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc); + return 0; err_admin_init: @@ -3233,7 +3244,7 @@ err_disable_msix: return rc; } -static int ena_destroy_device(struct ena_adapter *adapter, bool graceful) +int ena_destroy_device(struct ena_adapter *adapter, bool graceful) { struct net_device *netdev = adapter->netdev; struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -3271,6 +3282,8 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful) ena_com_admin_destroy(ena_dev); + ena_phc_destroy(adapter); + ena_com_mmio_reg_read_request_destroy(ena_dev); /* return reset reason to default value */ @@ -3282,7 +3295,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful) return rc; } -static int ena_restore_device(struct ena_adapter *adapter) +int ena_restore_device(struct ena_adapter *adapter) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -3344,6 +3357,7 @@ err_device_destroy: ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); + ena_phc_destroy(adapter); ena_com_mmio_reg_read_request_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); @@ -3867,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ena_adapter *adapter; struct net_device *netdev; static int adapters_found; + struct devlink *devlink; u32 max_num_io_queues; bool wd_state; int bars, rc; @@ -3932,10 +3947,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, adapter); + rc = ena_phc_alloc(adapter); + if (rc) { + netdev_err(netdev, "ena_phc_alloc failed\n"); + goto err_netdev_destroy; + } + rc = ena_com_allocate_customer_metrics_buffer(ena_dev); if (rc) { netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n"); - goto err_netdev_destroy; + goto err_free_phc; } rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); @@ -3944,12 +3965,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_metrics_destroy; } + /* Need to do this before ena_device_init */ + devlink = ena_devlink_alloc(adapter); + if (!devlink) { + netdev_err(netdev, "ena_devlink_alloc failed\n"); + rc = -ENOMEM; + goto err_metrics_destroy; + } + rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; - goto err_metrics_destroy; + goto ena_devlink_destroy; } /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. @@ -4033,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_rss; } + ena_debugfs_init(netdev); + INIT_WORK(&adapter->reset_task, ena_fw_reset_device); adapter->last_keep_alive_jiffies = jiffies; @@ -4054,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapters_found++; + /* From this point, the devlink device is visible to users. + * Perform the registration last to ensure that all the resources + * are available and that the netdevice is registered. + */ + ena_devlink_register(devlink, &pdev->dev); + return 0; err_rss: @@ -4070,8 +4107,12 @@ err_worker_destroy: err_device_destroy: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); +ena_devlink_destroy: + ena_devlink_free(devlink); err_metrics_destroy: ena_com_delete_customer_metrics_buffer(ena_dev); +err_free_phc: + ena_phc_free(adapter); err_netdev_destroy: free_netdev(netdev); err_free_region: @@ -4102,6 +4143,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) ena_dev = adapter->ena_dev; netdev = adapter->netdev; + ena_debugfs_terminate(netdev); + /* Make sure timer and reset routine won't be called after * freeing device resources. */ @@ -4112,6 +4155,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; ena_destroy_device(adapter, true); + ena_phc_free(adapter); + + ena_devlink_unregister(adapter->devlink); + ena_devlink_free(adapter->devlink); + if (shutdown) { netif_device_detach(netdev); dev_close(netdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 6e12ae3b12e5..006f9a3acea6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -16,6 +16,7 @@ #include <linux/skbuff.h> #include <net/xdp.h> #include <uapi/linux/bpf.h> +#include <net/devlink.h> #include "ena_com.h" #include "ena_eth_com.h" @@ -110,6 +111,8 @@ #define ENA_MMIO_DISABLE_REG_READ BIT(0) +struct ena_phc_info; + struct ena_irq { irq_handler_t handler; void *data; @@ -348,6 +351,8 @@ struct ena_adapter { char name[ENA_NAME_MAX_LEN]; + struct ena_phc_info *phc_info; + unsigned long flags; /* TX */ struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES] @@ -383,6 +388,13 @@ struct ena_adapter { struct bpf_prog *xdp_bpf_prog; u32 xdp_first_ring; u32 xdp_num_queues; + + struct devlink *devlink; + struct devlink_port devlink_port; +#ifdef CONFIG_DEBUG_FS + + struct dentry *debugfs_base; +#endif /* CONFIG_DEBUG_FS */ }; void ena_set_ethtool_ops(struct net_device *netdev); @@ -412,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter, set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } +int ena_destroy_device(struct ena_adapter *adapter, bool graceful); +int ena_restore_device(struct ena_adapter *adapter); int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, struct ena_tx_buffer *tx_info, bool is_xdp); diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c new file mode 100644 index 000000000000..7867e893fd15 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_phc.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved. + */ + +#include <linux/pci.h> +#include "ena_netdev.h" +#include "ena_phc.h" +#include "ena_devlink.h" + +static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta) +{ + return -EOPNOTSUPP; +} + +static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm) +{ + return -EOPNOTSUPP; +} + +static int ena_phc_feature_enable(struct ptp_clock_info *clock_info, + struct ptp_clock_request *rq, + int on) +{ + return -EOPNOTSUPP; +} + +static int ena_phc_gettimex64(struct ptp_clock_info *clock_info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct ena_phc_info *phc_info = + container_of(clock_info, struct ena_phc_info, clock_info); + unsigned long flags; + u64 timestamp_nsec; + int rc; + + spin_lock_irqsave(&phc_info->lock, flags); + + ptp_read_system_prets(sts); + + rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev, + ×tamp_nsec); + + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&phc_info->lock, flags); + + *ts = ns_to_timespec64(timestamp_nsec); + + return rc; +} + +static int ena_phc_settime64(struct ptp_clock_info *clock_info, + const struct timespec64 *ts) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ena_ptp_clock_info = { + .owner = THIS_MODULE, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjtime = ena_phc_adjtime, + .adjfine = ena_phc_adjfine, + .gettimex64 = ena_phc_gettimex64, + .settime64 = ena_phc_settime64, + .enable = ena_phc_feature_enable, +}; + +/* Enable/Disable PHC by the kernel, affects on the next init flow */ +void ena_phc_enable(struct ena_adapter *adapter, bool enable) +{ + struct ena_phc_info *phc_info = adapter->phc_info; + + if (!phc_info) { + netdev_err(adapter->netdev, "phc_info is not allocated\n"); + return; + } + + phc_info->enabled = enable; +} + +/* Check if PHC is enabled by the kernel */ +bool ena_phc_is_enabled(struct ena_adapter *adapter) +{ + struct ena_phc_info *phc_info = adapter->phc_info; + + return (phc_info && phc_info->enabled); +} + +/* PHC is activated if ptp clock is registered in the kernel */ +bool ena_phc_is_active(struct ena_adapter *adapter) +{ + struct ena_phc_info *phc_info = adapter->phc_info; + + return (phc_info && phc_info->clock); +} + +static int ena_phc_register(struct ena_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ptp_clock_info *clock_info; + struct ena_phc_info *phc_info; + int rc = 0; + + phc_info = adapter->phc_info; + clock_info = &phc_info->clock_info; + + /* PHC may already be registered in case of a reset */ + if (ena_phc_is_active(adapter)) + return 0; + + phc_info->adapter = adapter; + + spin_lock_init(&phc_info->lock); + + /* Fill the ptp_clock_info struct and register PTP clock */ + *clock_info = ena_ptp_clock_info; + snprintf(clock_info->name, + sizeof(clock_info->name), + "ena-ptp-%02x", + PCI_SLOT(pdev->devfn)); + + phc_info->clock = ptp_clock_register(clock_info, &pdev->dev); + if (IS_ERR(phc_info->clock)) { + rc = PTR_ERR(phc_info->clock); + netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n", + rc); + phc_info->clock = NULL; + } + + return rc; +} + +static void ena_phc_unregister(struct ena_adapter *adapter) +{ + struct ena_phc_info *phc_info = adapter->phc_info; + + /* During reset flow, PHC must stay registered + * to keep kernel's PHC index + */ + if (ena_phc_is_active(adapter) && + !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { + ptp_clock_unregister(phc_info->clock); + phc_info->clock = NULL; + } +} + +int ena_phc_alloc(struct ena_adapter *adapter) +{ + /* Allocate driver specific PHC info */ + adapter->phc_info = vzalloc(sizeof(*adapter->phc_info)); + if (unlikely(!adapter->phc_info)) { + netdev_err(adapter->netdev, "Failed to alloc phc_info\n"); + return -ENOMEM; + } + + return 0; +} + +void ena_phc_free(struct ena_adapter *adapter) +{ + if (adapter->phc_info) { + vfree(adapter->phc_info); + adapter->phc_info = NULL; + } +} + +int ena_phc_init(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct net_device *netdev = adapter->netdev; + int rc = -EOPNOTSUPP; + + /* Validate PHC feature is supported in the device */ + if (!ena_com_phc_supported(ena_dev)) { + netdev_dbg(netdev, "PHC feature is not supported by the device\n"); + goto err_ena_com_phc_init; + } + + /* Validate PHC feature is enabled by the kernel */ + if (!ena_phc_is_enabled(adapter)) { + netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n"); + goto err_ena_com_phc_init; + } + + /* Initialize device specific PHC info */ + rc = ena_com_phc_init(ena_dev); + if (unlikely(rc)) { + netdev_err(netdev, "Failed to init phc, error: %d\n", rc); + goto err_ena_com_phc_init; + } + + /* Configure PHC feature in driver and device */ + rc = ena_com_phc_config(ena_dev); + if (unlikely(rc)) { + netdev_err(netdev, "Failed to config phc, error: %d\n", rc); + goto err_ena_com_phc_config; + } + + /* Register to PTP class driver */ + rc = ena_phc_register(adapter); + if (unlikely(rc)) { + netdev_err(netdev, "Failed to register phc, error: %d\n", rc); + goto err_ena_com_phc_config; + } + + return 0; + +err_ena_com_phc_config: + ena_com_phc_destroy(ena_dev); +err_ena_com_phc_init: + ena_phc_enable(adapter, false); + ena_devlink_disable_phc_param(adapter->devlink); + return rc; +} + +void ena_phc_destroy(struct ena_adapter *adapter) +{ + ena_phc_unregister(adapter); + ena_com_phc_destroy(adapter->ena_dev); +} + +int ena_phc_get_index(struct ena_adapter *adapter) +{ + if (ena_phc_is_active(adapter)) + return ptp_clock_index(adapter->phc_info->clock); + + return -1; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h new file mode 100644 index 000000000000..7364fe714e44 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_phc.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved. + */ + +#ifndef ENA_PHC_H +#define ENA_PHC_H + +#include <linux/ptp_clock_kernel.h> + +struct ena_phc_info { + /* PTP hardware capabilities */ + struct ptp_clock_info clock_info; + + /* Registered PTP clock device */ + struct ptp_clock *clock; + + /* Adapter specific private data structure */ + struct ena_adapter *adapter; + + /* PHC lock */ + spinlock_t lock; + + /* Enabled by kernel */ + bool enabled; +}; + +void ena_phc_enable(struct ena_adapter *adapter, bool enable); +bool ena_phc_is_enabled(struct ena_adapter *adapter); +bool ena_phc_is_active(struct ena_adapter *adapter); +int ena_phc_get_index(struct ena_adapter *adapter); +int ena_phc_init(struct ena_adapter *adapter); +void ena_phc_destroy(struct ena_adapter *adapter); +int ena_phc_alloc(struct ena_adapter *adapter); +void ena_phc_free(struct ena_adapter *adapter); + +#endif /* ENA_PHC_H */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index a2efebafd686..51068dc1cc2a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -53,6 +53,11 @@ enum ena_regs_reset_reason_types { #define ENA_REGS_MMIO_RESP_HI_OFF 0x64 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 +/* phc_registers offsets */ + +/* 100 base */ +#define ENA_REGS_PHC_DB_OFF 0x100 + /* version register */ #define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff #define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 @@ -129,4 +134,7 @@ enum ena_regs_reset_reason_types { #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 +/* phc_db_req_id register */ +#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff + #endif /* _ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 0d61b8580d72..a6ea477bce3c 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -605,10 +605,8 @@ next: bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read); - if (processed < budget) { - napi_complete_done(&intf->rx_napi, processed); + if (processed < budget && napi_complete_done(&intf->rx_napi, processed)) bcmasp_enable_rx_irq(intf, 1); - } return processed; } @@ -1281,6 +1279,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, ndev->hw_features |= ndev->features; ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); + netdev_sw_irq_coalesce_default_on(ndev); + return intf; err_free_netdev: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 44199855ebfb..528ce9ca4f54 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3318,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev, return 0; } -static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +static int bnx2x_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) { + struct bnx2x *bp = netdev_priv(dev); + switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: @@ -3361,20 +3364,21 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = BNX2X_NUM_ETH_QUEUES(bp); return 0; - case ETHTOOL_GRXFH: - return bnx2x_get_rss_flags(bp, info); default: DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EOPNOTSUPP; } } -static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +static int bnx2x_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) { + struct bnx2x *bp = netdev_priv(dev); int udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, - "Set rss flags command parameters: flow type = %d, data = %llu\n", + "Set rss flags command parameters: flow type = %d, data = %u\n", info->flow_type, info->data); switch (info->flow_type) { @@ -3460,19 +3464,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) } } -static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) -{ - struct bnx2x *bp = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_SRXFH: - return bnx2x_set_rss_flags(bp, info); - default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } -} - static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) { return T_ETH_INDIRECTION_TABLE_SIZE; @@ -3684,10 +3675,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, - .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh = bnx2x_get_rxfh, .set_rxfh = bnx2x_set_rxfh, + .get_rxfh_fields = bnx2x_get_rxfh_fields, + .set_rxfh_fields = bnx2x_set_rxfh_fields, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, .get_module_info = bnx2x_get_module_info, @@ -3711,10 +3703,11 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .get_strings = bnx2x_get_strings, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, - .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh = bnx2x_get_rxfh, .set_rxfh = bnx2x_set_rxfh, + .get_rxfh_fields = bnx2x_get_rxfh_fields, + .set_rxfh_fields = bnx2x_set_rxfh_fields, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, .get_link_ksettings = bnx2x_get_vf_link_ksettings, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c9a1a1d504c0..3ee4b848ef53 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10219,8 +10219,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table) static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = { .sync_table = bnx2x_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 869580b6f70d..07e749fa1910 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1810,7 +1810,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) { struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); - /* if vf-rep dev is NULL, the must belongs to the PF */ + /* if vf-rep dev is NULL, it must belong to the PF */ return dev ? dev : bp->dev; } @@ -7116,7 +7116,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", ring_type); - return -1; + return -EINVAL; } resp = hwrm_req_hold(bp, req); @@ -10780,6 +10780,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bp->num_rss_ctx--; } +static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int rxr_id) +{ + u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + int i, vnic_rx; + + /* Ntuple VNIC always has all the rx rings. Any change of ring id + * must be updated because a future filter may use it. + */ + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + return true; + + for (i = 0; i < tbl_size; i++) { + if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; + else + vnic_rx = bp->rss_indir_tbl[i]; + + if (rxr_id == vnic_rx) + return true; + } + + return false; +} + +static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u16 mru, int rxr_id) +{ + int rc; + + if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id)) + return 0; + + if (mru) { + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + } + vnic->mru = mru; + bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + return 0; +} + +static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + int rc; + + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id); + if (rc) + return rc; + } + + return 0; +} + static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); @@ -14055,28 +14121,13 @@ static void bnxt_unlock_sp(struct bnxt *bp) netdev_unlock(bp->dev); } -/* Same as bnxt_lock_sp() with additional rtnl_lock */ -static void bnxt_rtnl_lock_sp(struct bnxt *bp) -{ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); - netdev_lock(bp->dev); -} - -static void bnxt_rtnl_unlock_sp(struct bnxt *bp) -{ - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - netdev_unlock(bp->dev); - rtnl_unlock(); -} - /* Only called from bnxt_sp_task() */ static void bnxt_reset(struct bnxt *bp, bool silent) { - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state)) bnxt_reset_task(bp, silent); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } /* Only called from bnxt_sp_task() */ @@ -14084,9 +14135,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) { int i; - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); return; } /* Disable and flush TPA before resetting the RX ring */ @@ -14125,7 +14176,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_TPA) bnxt_set_tpa(bp, true); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } static void bnxt_fw_fatal_close(struct bnxt *bp) @@ -15017,17 +15068,15 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; fallthrough; case BNXT_FW_RESET_STATE_OPENING: - while (!rtnl_trylock()) { + while (!netdev_trylock(bp->dev)) { bnxt_queue_fw_reset_work(bp, HZ / 10); return; } - netdev_lock(bp->dev); rc = bnxt_open(bp->dev); if (rc) { netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); netdev_unlock(bp->dev); - rtnl_unlock(); goto ulp_start; } @@ -15047,7 +15096,6 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_status_update(bp, true); } netdev_unlock(bp->dev); - rtnl_unlock(); bnxt_ulp_start(bp, 0); bnxt_reenable_sriov(bp); netdev_lock(bp->dev); @@ -15573,8 +15621,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { .set_port = bnxt_udp_tunnel_set_port, .unset_port = bnxt_udp_tunnel_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, @@ -15582,8 +15629,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { }, bnxt_udp_tunnels_p7 = { .set_port = bnxt_udp_tunnel_set_port, .unset_port = bnxt_udp_tunnel_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, @@ -15927,6 +15973,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) struct bnxt_vnic_info *vnic; struct bnxt_napi *bnapi; int i, rc; + u16 mru; rxr = &bp->rx_ring[idx]; clone = qmem; @@ -15977,28 +16024,22 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic->vnic_id, rc); + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx); + if (rc) return rc; - } - vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); } - - return 0; + return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx); err_reset: netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", rc); napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); - netif_close(dev); + bnxt_reset_task(bp, true); return rc; } @@ -16013,10 +16054,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - vnic->mru = 0; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + bnxt_set_vnic_mru_p5(bp, vnic, 0, idx); } + bnxt_set_rss_ctx_vnic_mru(bp, 0, idx); /* Make sure NAPI sees that the VNIC is disabled */ synchronize_net(); rxr = &bp->rx_ring[idx]; @@ -16814,7 +16855,6 @@ static int bnxt_resume(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rtnl_lock(); netdev_lock(dev); rc = pci_enable_device(bp->pdev); if (rc) { @@ -16859,7 +16899,6 @@ static int bnxt_resume(struct device *device) resume_exit: netdev_unlock(bp->dev); - rtnl_unlock(); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); @@ -17025,7 +17064,6 @@ static void bnxt_io_resume(struct pci_dev *pdev) int err; netdev_info(bp->dev, "PCI Slot Resume\n"); - rtnl_lock(); netdev_lock(netdev); err = bnxt_hwrm_func_qcaps(bp); @@ -17043,7 +17081,6 @@ static void bnxt_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); netdev_unlock(netdev); - rtnl_unlock(); bnxt_ulp_start(bp, err); if (!err) bnxt_reenable_sriov(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f5d490bf997e..4c10373abffd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1587,8 +1587,11 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp) return 0; } -static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) +static int bnxt_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct bnxt *bp = netdev_priv(dev); + cmd->data = 0; switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -1647,10 +1650,15 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) -static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) +static int bnxt_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) { - u32 rss_hash_cfg = bp->rss_hash_cfg; + struct bnxt *bp = netdev_priv(dev); int tuple, rc = 0; + u32 rss_hash_cfg; + + rss_hash_cfg = bp->rss_hash_cfg; if (cmd->data == RXH_4TUPLE) tuple = 4; @@ -1768,10 +1776,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, rc = bnxt_grxclsrule(bp, cmd); break; - case ETHTOOL_GRXFH: - rc = bnxt_grxfh(bp, cmd); - break; - default: rc = -EOPNOTSUPP; break; @@ -1786,10 +1790,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) int rc; switch (cmd->cmd) { - case ETHTOOL_SRXFH: - rc = bnxt_srxfh(bp, cmd); - break; - case ETHTOOL_SRXCLSRLINS: rc = bnxt_srxclsrlins(bp, cmd); break; @@ -5521,6 +5521,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, .set_rxfh = bnxt_set_rxfh, + .get_rxfh_fields = bnxt_get_rxfh_fields, + .set_rxfh_fields = bnxt_set_rxfh_fields, .create_rxfh_context = bnxt_create_rxfh_context, .modify_rxfh_context = bnxt_modify_rxfh_context, .remove_rxfh_context = bnxt_remove_rxfh_context, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 5ddddd89052f..bc0d80356568 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -823,7 +823,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) int tx_ok = 0, rx_ok = 0, rss_ok = 0; int avail_cp, avail_stat; - /* Check if we can enable requested num of vf's. At a mininum + /* Check if we can enable requested num of vf's. At a minimum * we require 1 RX 1 TX rings for each VF. In this minimum conf * features like TPA will not be available. */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d2ca90407cce..0599d3016224 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1316,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, /* Check if there's another flow using the same tunnel decap. * If not, add this tunnel to the table and resolve the other - * tunnel header fileds. Ignore src_port in the tunnel_key, + * tunnel header fields. Ignore src_port in the tunnel_key, * since it is not required for decap filters. */ decap_key->tp_src = 0; @@ -1410,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, /* Check if there's another flow using the same tunnel encap. * If not, add this tunnel to the table and resolve the other - * tunnel header fileds + * tunnel header fields */ encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, &tc_info->encap_ht_params, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 84c4812414fd..2450a369b792 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -231,10 +231,9 @@ void bnxt_ulp_stop(struct bnxt *bp) return; mutex_lock(&edev->en_dev_lock); - if (!bnxt_ulp_registered(edev)) { - mutex_unlock(&edev->en_dev_lock); - return; - } + if (!bnxt_ulp_registered(edev) || + (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) + goto ulp_stop_exit; edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; if (aux_priv) { @@ -250,6 +249,7 @@ void bnxt_ulp_stop(struct bnxt *bp) adrv->suspend(adev, pm); } } +ulp_stop_exit: mutex_unlock(&edev->en_dev_lock); } @@ -258,19 +258,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err) struct bnxt_aux_priv *aux_priv = bp->aux_priv; struct bnxt_en_dev *edev = bp->edev; - if (!edev) - return; - - edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; - - if (err) + if (!edev || err) return; mutex_lock(&edev->en_dev_lock); - if (!bnxt_ulp_registered(edev)) { - mutex_unlock(&edev->en_dev_lock); - return; - } + if (!bnxt_ulp_registered(edev) || + !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) + goto ulp_start_exit; if (edev->ulp_tbl->msix_requested) bnxt_fill_msix_vecs(bp, edev->msix_entries); @@ -287,6 +281,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err) adrv->resume(adev); } } +ulp_start_exit: + edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; mutex_unlock(&edev->en_dev_lock); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fa0077bc67b7..4f40f6afe88f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2472,10 +2472,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) work_done = bcmgenet_desc_rx(ring, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); + if (work_done < budget && napi_complete_done(napi, work_done)) bcmgenet_rx_ring_int_enable(ring); - } if (ring->dim.use_dim) { dim_update_sample(ring->dim.event_ctr, ring->dim.packets, @@ -3988,6 +3986,8 @@ static int bcmgenet_probe(struct platform_device *pdev) dev->hw_features |= dev->features; dev->vlan_features |= dev->features; + netdev_sw_irq_coalesce_default_on(dev); + /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; if (priv->wol_irq > 0) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index b6437ba7a2eb..573e8b279e52 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -169,10 +169,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) reg &= ~EXT_GPHY_RESET; } else { + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | - EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR; + EXT_CFG_IDDQ_GLOBAL_PWR; bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); mdelay(1); + reg |= EXT_CK25_DIS; } bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d1f1ae5ea161..53aaf6b08e39 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5654,6 +5654,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) return 0; } +static void macb_shutdown(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + rtnl_lock(); + + if (netif_running(netdev)) + dev_close(netdev); + + netif_device_detach(netdev); + + rtnl_unlock(); +} + static const struct dev_pm_ops macb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) @@ -5667,6 +5681,7 @@ static struct platform_driver macb_driver = { .of_match_table = of_match_ptr(macb_dt_ids), .pm = &macb_pm_ops, }, + .shutdown = macb_shutdown, }; module_platform_driver(macb_driver); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index ff8f2f9f9cae..75f22f74774c 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1208,45 +1208,6 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) } EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device); -int validate_cn23xx_pf_config_info(struct octeon_device *oct, - struct octeon_config *conf23xx) -{ - if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) { - dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", - __func__, CFG_GET_IQ_MAX_Q(conf23xx), - CN23XX_MAX_INPUT_QUEUES); - return 1; - } - - if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) { - dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", - __func__, CFG_GET_OQ_MAX_Q(conf23xx), - CN23XX_MAX_OUTPUT_QUEUES); - return 1; - } - - if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR && - CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) { - dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", - __func__); - return 1; - } - - if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) { - dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", - __func__); - return 1; - } - - if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) { - dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", - __func__); - return 1; - } - - return 0; -} - int cn23xx_fw_loaded(struct octeon_device *oct) { u64 val; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 234b96b4f488..bbe9f3133b07 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -54,9 +54,6 @@ struct oct_vf_stats { int setup_cn23xx_octeon_pf_device(struct octeon_device *oct); -int validate_cn23xx_pf_config_info(struct octeon_device *oct, - struct octeon_config *conf23xx); - u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); int cn23xx_sriov_config(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index d0ff0c170b1a..fc6053414b7d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev, return 0; } -static int nicvf_get_rss_hash_opts(struct nicvf *nic, - struct ethtool_rxnfc *info) +static int nicvf_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) { info->data = 0; @@ -552,25 +552,28 @@ static int nicvf_get_rxnfc(struct net_device *dev, info->data = nic->rx_queues; ret = 0; break; - case ETHTOOL_GRXFH: - return nicvf_get_rss_hash_opts(nic, info); default: break; } return ret; } -static int nicvf_set_rss_hash_opts(struct nicvf *nic, - struct ethtool_rxnfc *info) +static int nicvf_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) { - struct nicvf_rss_info *rss = &nic->rss_info; - u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); + struct nicvf *nic = netdev_priv(dev); + struct nicvf_rss_info *rss; + u64 rss_cfg; + + rss = &nic->rss_info; + rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); if (!rss->enable) netdev_err(nic->netdev, "RSS is disabled, hash cannot be set\n"); - netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", + netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n", info->flow_type, info->data); if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) @@ -628,19 +631,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic, return 0; } -static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) -{ - struct nicvf *nic = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_SRXFH: - return nicvf_set_rss_hash_opts(nic, info); - default: - break; - } - return -EOPNOTSUPP; -} - static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) { return RSS_HASH_KEY_SIZE * sizeof(u64); @@ -872,11 +862,12 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_ringparam = nicvf_get_ringparam, .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, - .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, .get_rxfh = nicvf_get_rxfh, .set_rxfh = nicvf_set_rxfh, + .get_rxfh_fields = nicvf_get_rxfh_fields, + .set_rxfh_fields = nicvf_set_rxfh_fields, .get_channels = nicvf_get_channels, .set_channels = nicvf_set_channels, .get_pauseparam = nicvf_get_pauseparam, diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 9749d1239f58..5d5f3380ecca 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -176,43 +176,6 @@ again: EXPORT_SYMBOL(t3_l2t_send_slow); -void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) -{ -again: - switch (e->state) { - case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ - neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_STALE) { - e->state = L2T_STATE_VALID; - } - spin_unlock_bh(&e->lock); - return; - case L2T_STATE_VALID: /* fast-path, send the packet on */ - return; - case L2T_STATE_RESOLVING: - spin_lock_bh(&e->lock); - if (e->state != L2T_STATE_RESOLVING) { - /* ARP already completed */ - spin_unlock_bh(&e->lock); - goto again; - } - spin_unlock_bh(&e->lock); - - /* - * Only the first packet added to the arpq should kick off - * resolution. However, because the alloc_skb below can fail, - * we allow each packet added to the arpq to retry resolution - * as a way of recovering from transient memory exhaustion. - * A better way would be to use a work request to retry L2T - * entries when there's no memory. - */ - neigh_event_send(e->neigh, NULL); - } -} - -EXPORT_SYMBOL(t3_l2t_send_event); - /* * Allocate a free L2T entry. Must be called with l2t_data.lock held. */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index 646ca0bc25bd..33558f177497 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, struct net_device *dev, const void *daddr); int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e); -void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 1546c3db08f0..23326235d4ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1730,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev, return 0; } +static int cxgb4_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) +{ + const struct port_info *pi = netdev_priv(dev); + unsigned int v = pi->rss_mode; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + } + return 0; +} + static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { @@ -1739,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, int ret = 0; switch (info->cmd) { - case ETHTOOL_GRXFH: { - unsigned int v = pi->rss_mode; - - info->data = 0; - switch (info->flow_type) { - case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - } - return 0; - } case ETHTOOL_GRXRINGS: info->data = pi->nqsets; return 0; @@ -2199,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, .set_rxfh = set_rss_table, + .get_rxfh_fields = cxgb4_get_rxfh_fields, .self_test = cxgb4_self_test, .flash_device = set_flash, .get_ts_info = get_ts_info, diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 529160926a96..a50f5dad34d5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -528,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) return 0; } -static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd) +static int enic_get_rx_flow_hash(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct enic *enic = netdev_priv(dev); u8 rss_hash_type = 0; cmd->data = 0; @@ -597,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = enic_grxclsrule(enic, cmd); spin_unlock_bh(&enic->rfs_h.lock); break; - case ETHTOOL_GRXFH: - ret = enic_get_rx_flow_hash(enic, cmd); - break; default: ret = -EOPNOTSUPP; break; @@ -693,6 +692,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_rxfh_key_size = enic_get_rxfh_key_size, .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, + .get_rxfh_fields = enic_get_rx_flow_hash, .get_link_ksettings = enic_get_ksettings, .get_ts_info = enic_get_ts_info, .get_channels = enic_get_channels, diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index da9b7715df05..cc60ee454bf9 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = { .ndo_tx_timeout = rio_tx_timeout, }; +static bool is_support_rmon_mmio(struct pci_dev *pdev) +{ + return pdev->vendor == PCI_VENDOR_ID_DLINK && + pdev->device == 0x4000 && + pdev->revision == 0x0c; +} + static int rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -131,18 +138,22 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np = netdev_priv(dev); + if (is_support_rmon_mmio(pdev)) + np->rmon_enable = true; + /* IO registers range. */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) goto err_out_dev; np->eeprom_addr = ioaddr; -#ifdef MEM_MAPPING - /* MM registers range. */ - ioaddr = pci_iomap(pdev, 1, 0); - if (!ioaddr) - goto err_out_iounmap; -#endif + if (np->rmon_enable) { + /* MM registers range. */ + ioaddr = pci_iomap(pdev, 1, 0); + if (!ioaddr) + goto err_out_iounmap; + } + np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; @@ -289,9 +300,8 @@ err_out_unmap_tx: dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: -#ifdef MEM_MAPPING - pci_iounmap(pdev, np->ioaddr); -#endif + if (np->rmon_enable) + pci_iounmap(pdev, np->ioaddr); pci_iounmap(pdev, np->eeprom_addr); err_out_dev: free_netdev (dev); @@ -578,7 +588,8 @@ static void rio_hw_init(struct net_device *dev) dw8(TxDMAPollPeriod, 0xff); dw8(RxDMABurstThresh, 0x30); dw8(RxDMAUrgentThresh, 0x30); - dw32(RmonStatMask, 0x0007ffff); + if (!np->rmon_enable) + dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); @@ -1076,9 +1087,6 @@ get_stats (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; -#ifdef MEM_MAPPING - int i; -#endif unsigned int stat_reg; unsigned long flags; @@ -1123,10 +1131,10 @@ get_stats (struct net_device *dev) dr16(MacControlFramesXmtd); dr16(FramesWEXDeferal); -#ifdef MEM_MAPPING - for (i = 0x100; i <= 0x150; i += 4) - dr32(i); -#endif + if (np->rmon_enable) + for (int i = 0x100; i <= 0x150; i += 4) + dr32(i); + dr16(TxJumboFrames); dr16(RxJumboFrames); dr16(TCPCheckSumErrors); @@ -1143,9 +1151,6 @@ clear_stats (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; -#ifdef MEM_MAPPING - int i; -#endif /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ @@ -1181,10 +1186,9 @@ clear_stats (struct net_device *dev) dr16(BcstFramesXmtdOk); dr16(MacControlFramesXmtd); dr16(FramesWEXDeferal); -#ifdef MEM_MAPPING - for (i = 0x100; i <= 0x150; i += 4) - dr32(i); -#endif + if (np->rmon_enable) + for (int i = 0x100; i <= 0x150; i += 4) + dr32(i); dr16(TxJumboFrames); dr16(RxJumboFrames); dr16(TCPCheckSumErrors); @@ -1810,9 +1814,8 @@ rio_remove1 (struct pci_dev *pdev) np->rx_ring_dma); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); -#ifdef MEM_MAPPING - pci_iounmap(pdev, np->ioaddr); -#endif + if (np->rmon_enable) + pci_iounmap(pdev, np->ioaddr); pci_iounmap(pdev, np->eeprom_addr); free_netdev (dev); pci_release_regions (pdev); diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index ba679025e866..4788cc94639d 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -403,6 +403,8 @@ struct netdev_private { u16 negotiate; /* Negotiated media */ int phy_addr; /* PHY addresses. */ u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */ + + bool rmon_enable; }; /* The station address location in the EEPROM. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3d2e21592119..f49400ba9729 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4031,8 +4031,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table, static const struct udp_tunnel_nic_info be_udp_tunnels = { .set_port = be_vxlan_set_port, .unset_port = be_vxlan_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index c699bd6bcbb9..474073c7f94d 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -31,6 +31,7 @@ config FTGMAC100 depends on ARM || COMPILE_TEST depends on !64BIT || BROKEN select PHYLIB + select FIXED_PHY select MDIO_ASPEED if MACH_ASPEED_G6 select CRC32 help diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 23c23cca2620..3edc8d142dd5 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -28,7 +28,6 @@ #include <linux/percpu.h> #include <linux/dma-mapping.h> #include <linux/sort.h> -#include <linux/phy_fixed.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <soc/fsl/bman.h> @@ -3150,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = { .ndo_stop = dpaa_eth_stop, .ndo_tx_timeout = dpaa_tx_timeout, .ndo_get_stats64 = dpaa_get_stats64, - .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 9986f6e1f587..0c588e03b15e 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -263,8 +263,8 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, ethtool_puts(&data, dpaa_stats_global[i]); } -static int dpaa_get_hash_opts(struct net_device *dev, - struct ethtool_rxnfc *cmd) +static int dpaa_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { struct dpaa_priv *priv = netdev_priv(dev); @@ -299,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev, return 0; } -static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *unused) -{ - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXFH: - ret = dpaa_get_hash_opts(dev, cmd); - break; - default: - break; - } - - return ret; -} - static void dpaa_set_hash(struct net_device *net_dev, bool enable) { struct mac_device *mac_dev; @@ -329,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable) priv->keygen_in_use = enable; } -static int dpaa_set_hash_opts(struct net_device *dev, - struct ethtool_rxnfc *nfc) +static int dpaa_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { int ret = -EINVAL; @@ -364,21 +349,6 @@ static int dpaa_set_hash_opts(struct net_device *dev, return ret; } -static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = dpaa_set_hash_opts(dev, cmd); - break; - default: - break; - } - - return ret; -} - static int dpaa_get_ts_info(struct net_device *net_dev, struct kernel_ethtool_ts_info *info) { @@ -510,8 +480,8 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_strings = dpaa_get_strings, .get_link_ksettings = dpaa_get_link_ksettings, .set_link_ksettings = dpaa_set_link_ksettings, - .get_rxnfc = dpaa_get_rxnfc, - .set_rxnfc = dpaa_set_rxnfc, + .get_rxfh_fields = dpaa_get_rxfh_fields, + .set_rxfh_fields = dpaa_set_rxfh_fields, .get_ts_info = dpaa_get_ts_info, .get_coalesce = dpaa_get_coalesce, .set_coalesce = dpaa_set_coalesce, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 74ef77cb7078..00474ed11d53 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -719,13 +719,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, int i, j = 0; switch (rxnfc->cmd) { - case ETHTOOL_GRXFH: - /* we purposely ignore cmd->flow_type for now, because the - * classifier only supports a single set of fields for all - * protocols - */ - rxnfc->data = priv->rx_hash_fields; - break; case ETHTOOL_GRXRINGS: rxnfc->data = dpaa2_eth_queue_count(priv); break; @@ -767,11 +760,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, int err = 0; switch (rxnfc->cmd) { - case ETHTOOL_SRXFH: - if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) - return -EOPNOTSUPP; - err = dpaa2_eth_set_hash(net_dev, rxnfc->data); - break; case ETHTOOL_SRXCLSRLINS: err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location); break; @@ -785,6 +773,28 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, return err; } +static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev, + struct ethtool_rxfh_fields *rxnfc) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + /* we purposely ignore cmd->flow_type for now, because the + * classifier only supports a single set of fields for all + * protocols + */ + rxnfc->data = priv->rx_hash_fields; + return 0; +} + +static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev, + const struct ethtool_rxfh_fields *rxnfc, + struct netlink_ext_ack *extack) +{ + if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) + return -EOPNOTSUPP; + return dpaa2_eth_set_hash(net_dev, rxnfc->data); +} + int dpaa2_phc_index = -1; EXPORT_SYMBOL(dpaa2_phc_index); @@ -939,6 +949,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_strings = dpaa2_eth_get_strings, .get_rxnfc = dpaa2_eth_get_rxnfc, .set_rxnfc = dpaa2_eth_set_rxnfc, + .get_rxfh_fields = dpaa2_eth_get_rxfh_fields, + .set_rxfh_fields = dpaa2_eth_set_rxfh_fields, .get_ts_info = dpaa2_eth_get_ts_info, .get_tunable = dpaa2_eth_get_tunable, .set_tunable = dpaa2_eth_set_tunable, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index dcc3fbac3481..e4287725832e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1375,6 +1375,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, } if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { + struct enetc_hw *hw = &priv->si->hw; __be16 tpid = 0; switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { @@ -1385,15 +1386,12 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, tpid = htons(ETH_P_8021AD); break; case 2: - tpid = htons(enetc_port_rd(&priv->si->hw, - ENETC_PCVLANR1)); + tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) & + SICVLANR_ETYPE); break; case 3: - tpid = htons(enetc_port_rd(&priv->si->hw, - ENETC_PCVLANR2)); - break; - default: - break; + tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) & + SICVLANR_ETYPE); } __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index d38cd36be4a6..2e5cef646741 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -467,7 +467,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev, #define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ RXH_IP_DST) #define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) -static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) +static int enetc_get_rxfh_fields(struct net_device *netdev, + struct ethtool_rxfh_fields *rxnfc) { static const u32 rsshash[] = { [TCP_V4_FLOW] = ENETC_RSSHASH_L4, @@ -584,9 +585,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, case ETHTOOL_GRXRINGS: rxnfc->data = priv->num_rx_rings; break; - case ETHTOOL_GRXFH: - /* get RSS hash config */ - return enetc_get_rsshash(rxnfc); case ETHTOOL_GRXCLSRLCNT: /* total number of entries */ rxnfc->data = priv->si->num_fs_entries; @@ -639,8 +637,6 @@ static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc case ETHTOOL_GRXRINGS: rxnfc->data = priv->num_rx_rings; break; - case ETHTOOL_GRXFH: - return enetc_get_rsshash(rxnfc); default: return -EOPNOTSUPP; } @@ -1228,6 +1224,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = { .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, .set_rxfh = enetc_set_rxfh, + .get_rxfh_fields = enetc_get_rxfh_fields, .get_ringparam = enetc_get_ringparam, .get_coalesce = enetc_get_coalesce, .set_coalesce = enetc_set_coalesce, @@ -1258,6 +1255,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = { .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, .set_rxfh = enetc_set_rxfh, + .get_rxfh_fields = enetc_get_rxfh_fields, .get_ringparam = enetc_get_ringparam, .get_coalesce = enetc_get_coalesce, .set_coalesce = enetc_set_coalesce, @@ -1284,6 +1282,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = { .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, .set_rxfh = enetc_set_rxfh, + .get_rxfh_fields = enetc_get_rxfh_fields, }; void enetc_set_ethtool_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 4098f01479bc..cb26f185f52f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -43,6 +43,9 @@ #define ENETC_SIPMAR0 0x80 #define ENETC_SIPMAR1 0x84 +#define ENETC_SICVLANR1 0x90 +#define ENETC_SICVLANR2 0x94 +#define SICVLANR_ETYPE GENMASK(15, 0) /* VF-PF Message passing */ #define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */ diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index c81f2ea588f2..5c8fdcef759b 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -14,14 +14,14 @@ #define FEC_H /****************************************************************************/ +#include <dt-bindings/firmware/imx/rsrc.h> +#include <linux/bpf.h> #include <linux/clocksource.h> +#include <linux/firmware/imx/sci.h> #include <linux/net_tstamp.h> #include <linux/pm_qos.h> -#include <linux/bpf.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> -#include <dt-bindings/firmware/imx/rsrc.h> -#include <linux/firmware/imx/sci.h> #include <net/xdp.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ @@ -115,7 +115,7 @@ #define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ #define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ #define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ -#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */ #define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ #define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ #define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ @@ -342,7 +342,7 @@ struct bufdesc_ex { #define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20) /* The number of Tx and Rx buffers. These are allocated from the page - * pool. The code may assume these are power of two, so it it best + * pool. The code may assume these are power of two, so it is best * to keep them that size. * We don't need to allocate pages for the transmitter. We just use * the skbuffer directly. @@ -460,7 +460,7 @@ struct bufdesc_ex { #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) -/* Controller supports interrupt coalesc */ +/* Controller supports interrupt coalesce */ #define FEC_QUIRK_HAS_COALESCE (1 << 13) /* Interrupt doesn't wake CPU from deep idle */ #define FEC_QUIRK_ERR006687 (1 << 14) @@ -495,7 +495,7 @@ struct bufdesc_ex { */ #define FEC_QUIRK_HAS_EEE (1 << 20) -/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC +/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC * as an alternative option to make sure it works well with various PHYs. * For the implementation of delayed clock, ENET takes synchronized 250MHz * clocks to generate 2ns delay. @@ -614,7 +614,6 @@ struct fec_enet_private { unsigned int num_tx_queues; unsigned int num_rx_queues; - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 17e9bddb9ddd..63dac4272045 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -22,56 +22,55 @@ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/pm_runtime.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <linux/cacheflush.h> +#include <linux/clk.h> +#include <linux/crc32.h> #include <linux/delay.h> -#include <linux/netdevice.h> +#include <linux/errno.h> #include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <net/ip.h> -#include <net/page_pool/helpers.h> -#include <net/selftests.h> -#include <net/tso.h> -#include <linux/tcp.h> -#include <linux/udp.h> +#include <linux/fec.h> +#include <linux/filter.h> +#include <linux/gpio/consumer.h> #include <linux/icmp.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <linux/in.h> +#include <linux/interrupt.h> #include <linux/io.h> +#include <linux/ioport.h> +#include <linux/ip.h> #include <linux/irq.h> -#include <linux/clk.h> -#include <linux/crc32.h> -#include <linux/platform_device.h> -#include <linux/property.h> +#include <linux/kernel.h> #include <linux/mdio.h> -#include <linux/phy.h> -#include <linux/fec.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/regulator/consumer.h> -#include <linux/if_vlan.h> +#include <linux/phy.h> #include <linux/pinctrl/consumer.h> -#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/prefetch.h> -#include <linux/mfd/syscon.h> +#include <linux/property.h> +#include <linux/ptrace.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/workqueue.h> +#include <net/ip.h> +#include <net/page_pool/helpers.h> +#include <net/selftests.h> +#include <net/tso.h> #include <soc/imx/cpuidle.h> -#include <linux/filter.h> -#include <linux/bpf.h> -#include <linux/bpf_trace.h> - -#include <asm/cacheflush.h> #include "fec.h" @@ -131,7 +130,7 @@ static const struct fec_devinfo fec_mvf600_info = { FEC_QUIRK_HAS_MDIO_C45, }; -static const struct fec_devinfo fec_imx6x_info = { +static const struct fec_devinfo fec_imx6sx_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | @@ -196,7 +195,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, @@ -276,6 +275,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_ECR_MAGICEN BIT(2) #define FEC_ECR_SLEEP BIT(3) #define FEC_ECR_EN1588 BIT(4) +#define FEC_ECR_SPEED BIT(5) #define FEC_ECR_BYTESWP BIT(8) /* FEC RCR bits definition */ #define FEC_RCR_LOOP BIT(0) @@ -1207,7 +1207,7 @@ fec_restart(struct net_device *ndev) /* 1G, 100M or 10M */ if (ndev->phydev) { if (ndev->phydev->speed == SPEED_1000) - ecntl |= (1 << 5); + ecntl |= FEC_ECR_SPEED; else if (ndev->phydev->speed == SPEED_100) rcntl &= ~FEC_RCR_10BASET; else @@ -1706,13 +1706,29 @@ xdp_err: return ret; } +static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb) +{ + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { + const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb); + const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI); + + /* Push and remove the vlan tag */ + + memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } +} + /* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */ static int -fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) +fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_priv_rx_q *rxq; @@ -1720,11 +1736,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) unsigned short status; struct sk_buff *skb; ushort pkt_len; - __u8 *data; int pkt_received = 0; struct bufdesc_ex *ebdp = NULL; - bool vlan_packet_rcvd = false; - u16 vlan_tag; int index = 0; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); @@ -1843,10 +1856,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) skb_mark_for_recycle(skb); if (unlikely(need_swap)) { + u8 *data; + data = page_address(page) + FEC_ENET_XDP_HEADROOM; swap_buffer(data, pkt_len); } - data = skb->data; /* Extract the enhanced buffer descriptor */ ebdp = NULL; @@ -1854,20 +1868,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ebdp = (struct bufdesc_ex *)bdp; /* If this is a VLAN packet remove the VLAN Tag */ - vlan_packet_rcvd = false; - if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && - (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { - /* Push and remove the vlan tag */ - struct vlan_hdr *vlan_header = - (struct vlan_hdr *) (data + ETH_HLEN); - vlan_tag = ntohs(vlan_header->h_vlan_TCI); - - vlan_packet_rcvd = true; - - memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); - skb_pull(skb, VLAN_HLEN); - } + if (fep->bufdesc_ex && + (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) + fec_enet_rx_vlan(ndev, skb); skb->protocol = eth_type_trans(skb, ndev); @@ -1886,12 +1889,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) } } - /* Handle received VLAN packets */ - if (vlan_packet_rcvd) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); @@ -1939,7 +1936,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget) /* Make sure that AVB queues are processed first. */ for (i = fep->num_rx_queues - 1; i >= 0; i--) - done += fec_enet_rx_queue(ndev, budget - done, i); + done += fec_enet_rx_queue(ndev, i, budget - done); return done; } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 2bfaf14f65c8..3fc29afc9854 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev) out_be32(&fec->rfifo_alarm, 0x0000030c); out_be32(&fec->tfifo_alarm, 0x00000100); - /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */ + /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */ out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B); /* enable crc generation */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 876d90832596..afe162c9eed8 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -7,30 +7,30 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> +#include <linux/bitops.h> +#include <linux/clk.h> #include <linux/delay.h> -#include <linux/netdevice.h> +#include <linux/errno.h> #include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/bitops.h> +#include <linux/fec.h> +#include <linux/interrupt.h> #include <linux/io.h> +#include <linux/ioport.h> #include <linux/irq.h> -#include <linux/clk.h> -#include <linux/platform_device.h> -#include <linux/phy.h> -#include <linux/fec.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_net.h> +#include <linux/pci.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/ptrace.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/workqueue.h> #include "fec.h" @@ -117,7 +117,7 @@ static u64 fec_ptp_read(const struct cyclecounter *cc) * @fep: the fec_enet_private structure handle * @enable: enable the channel pps output * - * This function enble the PPS ouput on the timer channel. + * This function enables the PPS output on the timer channel. */ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) { @@ -172,7 +172,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) * very close to the second point, which means NSEC_PER_SEC * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer * is still running when we calculate the first compare event, it is - * possible that the remaining nanoseonds run out before the compare + * possible that the remaining nanoseconds run out before the compare * counter is calculated and written into TCCR register. To avoid * this possibility, we will set the compare event to be the next * of next second. The current setting is 31-bit timer and wrap diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 3925441143fa..0291093f2e4e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1225,7 +1225,7 @@ int memac_initialization(struct mac_device *mac_dev, * be careful and not enable this if we are using MII or RGMII, since * those configurations modes don't use in-band autonegotiation. */ - if (!of_property_read_bool(mac_node, "managed") && + if (!of_property_present(mac_node, "managed") && mac_dev->phy_if != PHY_INTERFACE_MODE_MII && !phy_interface_mode_is_rgmii(mac_dev->phy_if)) mac_dev->phylink_config.default_an_inband = true; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index bcbcad613512..7c0f049f0938 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -97,6 +97,7 @@ #include <linux/phy_fixed.h> #include <linux/of.h> #include <linux/of_net.h> +#include <linux/property.h> #include "gianfar.h" @@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np, return 0; } -static int gfar_of_group_count(struct device_node *np) -{ - struct device_node *child; - int num = 0; - - for_each_available_child_of_node(np, child) - if (of_node_name_eq(child, "queue-group")) - num++; - - return num; -} - /* Reads the controller's registers to determine what interface * connects it to the PHY. */ @@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_rx_qs = 1; } else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ - unsigned int num_grps = gfar_of_group_count(np); + unsigned int num_grps; + num_grps = device_get_named_child_node_count(&ofdev->dev, + "queue-group"); if (num_grps == 0 || num_grps > MAXGROUPS) { dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", num_grps); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 781d92e703cb..28f53cf2a174 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -781,14 +781,26 @@ err: return ret; } -static int gfar_set_hash_opts(struct gfar_private *priv, - struct ethtool_rxnfc *cmd) +static int gfar_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) { + struct gfar_private *priv = netdev_priv(dev); + int ret; + + if (test_bit(GFAR_RESETTING, &priv->state)) + return -EBUSY; + + mutex_lock(&priv->rx_queue_access); + + ret = 0; /* write the filer rules here */ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) - return -EINVAL; + ret = -EINVAL; - return 0; + mutex_unlock(&priv->rx_queue_access); + + return ret; } static int gfar_check_filer_hardware(struct gfar_private *priv) @@ -1398,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) mutex_lock(&priv->rx_queue_access); switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = gfar_set_hash_opts(priv, cmd); - break; case ETHTOOL_SRXCLSRLINS: if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && cmd->fs.ring_cookie >= priv->num_rx_queues) || @@ -1508,6 +1517,7 @@ const struct ethtool_ops gfar_ethtool_ops = { #endif .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, + .set_rxfh_fields = gfar_set_rxfh_fields, .get_ts_info = gfar_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig index 564862a57124..14c9431e15e5 100644 --- a/drivers/net/ethernet/google/Kconfig +++ b/drivers/net/ethernet/google/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE config GVE tristate "Google Virtual NIC (gVNIC) support" depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN)) + depends on PTP_1588_CLOCK_OPTIONAL select PAGE_POOL help This driver supports Google Virtual NIC (gVNIC)" diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile index 4520f1c07a63..e0ec227a50f7 100644 --- a/drivers/net/ethernet/google/gve/Makefile +++ b/drivers/net/ethernet/google/gve/Makefile @@ -1,5 +1,7 @@ # Makefile for the Google virtual Ethernet (gve) driver obj-$(CONFIG_GVE) += gve.o -gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \ +gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \ gve_buffer_mgmt_dqo.o + +gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 2fab38c8ee78..4469442d4940 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -11,7 +11,9 @@ #include <linux/dmapool.h> #include <linux/ethtool_netlink.h> #include <linux/netdevice.h> +#include <linux/net_tstamp.h> #include <linux/pci.h> +#include <linux/ptp_clock_kernel.h> #include <linux/u64_stats_sync.h> #include <net/page_pool/helpers.h> #include <net/xdp.h> @@ -750,6 +752,12 @@ struct gve_rss_config { u32 *hash_lut; }; +struct gve_ptp { + struct ptp_clock_info info; + struct ptp_clock *clock; + struct gve_priv *priv; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ @@ -781,7 +789,7 @@ struct gve_priv { struct gve_tx_queue_config tx_cfg; struct gve_rx_queue_config rx_cfg; - u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ + u32 num_ntfy_blks; /* split between TX and RX so must be even */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ __be32 __iomem *db_bar2; /* "array" of doorbells */ @@ -813,6 +821,7 @@ struct gve_priv { u32 adminq_set_driver_parameter_cnt; u32 adminq_report_stats_cnt; u32 adminq_report_link_speed_cnt; + u32 adminq_report_nic_timestamp_cnt; u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; u32 adminq_query_flow_rules_cnt; @@ -870,6 +879,14 @@ struct gve_priv { u16 rss_lut_size; bool cache_rss_config; struct gve_rss_config rss_config; + + /* True if the device supports reading the nic clock */ + bool nic_timestamp_supported; + struct gve_ptp *ptp; + struct kernel_hwtstamp_config ts_config; + struct gve_nic_ts_report *nic_ts_report; + dma_addr_t nic_ts_report_bus; + u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */ }; enum gve_service_task_flags_bit { @@ -1249,6 +1266,24 @@ int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); int gve_flow_rules_reset(struct gve_priv *priv); /* RSS config */ int gve_init_rss_config(struct gve_priv *priv, u16 num_queues); +/* PTP and timestamping */ +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int gve_clock_nic_ts_read(struct gve_priv *priv); +int gve_init_clock(struct gve_priv *priv); +void gve_teardown_clock(struct gve_priv *priv); +#else /* CONFIG_PTP_1588_CLOCK */ +static inline int gve_clock_nic_ts_read(struct gve_priv *priv) +{ + return -EOPNOTSUPP; +} + +static inline int gve_init_clock(struct gve_priv *priv) +{ + return 0; +} + +static inline void gve_teardown_clock(struct gve_priv *priv) { } +#endif /* CONFIG_PTP_1588_CLOCK */ /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 3e8fc33cc11f..4f33d094a2ef 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -46,6 +46,7 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_rss_config **dev_op_rss_config, + struct gve_device_option_nic_timestamp **dev_op_nic_timestamp, struct gve_device_option_modify_ring **dev_op_modify_ring) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); @@ -225,6 +226,23 @@ void gve_parse_device_option(struct gve_priv *priv, "RSS config"); *dev_op_rss_config = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_NIC_TIMESTAMP: + if (option_length < sizeof(**dev_op_nic_timestamp) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Nic Timestamp", + (int)sizeof(**dev_op_nic_timestamp), + GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_nic_timestamp)) + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Nic Timestamp"); + *dev_op_nic_timestamp = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -246,6 +264,7 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_rss_config **dev_op_rss_config, + struct gve_device_option_nic_timestamp **dev_op_nic_timestamp, struct gve_device_option_modify_ring **dev_op_modify_ring) { const int num_options = be16_to_cpu(descriptor->num_device_options); @@ -269,6 +288,7 @@ gve_process_device_options(struct gve_priv *priv, dev_op_dqo_rda, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, dev_op_flow_steering, dev_op_rss_config, + dev_op_nic_timestamp, dev_op_modify_ring); dev_opt = next_opt; } @@ -306,6 +326,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) priv->adminq_set_driver_parameter_cnt = 0; priv->adminq_report_stats_cnt = 0; priv->adminq_report_link_speed_cnt = 0; + priv->adminq_report_nic_timestamp_cnt = 0; priv->adminq_get_ptype_map_cnt = 0; priv->adminq_query_flow_rules_cnt = 0; priv->adminq_cfg_flow_rule_cnt = 0; @@ -442,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv) int tail, head; int i; + lockdep_assert_held(&priv->adminq_lock); + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); head = priv->adminq_prod_cnt; @@ -467,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv) return 0; } -/* This function is not threadsafe - the caller is responsible for any - * necessary locks. - */ static int gve_adminq_issue_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig) { @@ -477,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, u32 opcode; u32 tail; + lockdep_assert_held(&priv->adminq_lock); + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); // Check if next command will overflow the buffer. @@ -544,6 +566,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_REPORT_LINK_SPEED: priv->adminq_report_link_speed_cnt++; break; + case GVE_ADMINQ_REPORT_NIC_TIMESTAMP: + priv->adminq_report_nic_timestamp_cnt++; + break; case GVE_ADMINQ_GET_PTYPE_MAP: priv->adminq_get_ptype_map_cnt++; break; @@ -564,6 +589,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); + return -EINVAL; } return 0; @@ -625,7 +651,7 @@ static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode, /* The device specifies that the management vector can either be the first irq * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to - * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then + * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then * the management vector is first. * * gve arranges the msix vectors so that the management vector is last. @@ -709,13 +735,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que int err; int i; + mutex_lock(&priv->adminq_lock); + for (i = start_id; i < start_id + num_queues; i++) { err = gve_adminq_create_tx_queue(priv, i); if (err) - return err; + goto out; } - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; } static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, @@ -788,13 +820,19 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) int err; int i; + mutex_lock(&priv->adminq_lock); + for (i = 0; i < num_queues; i++) { err = gve_adminq_create_rx_queue(priv, i); if (err) - return err; + goto out; } - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; } static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) @@ -820,13 +858,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu int err; int i; + mutex_lock(&priv->adminq_lock); + for (i = start_id; i < start_id + num_queues; i++) { err = gve_adminq_destroy_tx_queue(priv, i); if (err) - return err; + goto out; } - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; } static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd, @@ -861,13 +905,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) int err; int i; + mutex_lock(&priv->adminq_lock); + for (i = 0; i < num_queues; i++) { err = gve_adminq_destroy_rx_queue(priv, i); if (err) - return err; + goto out; } - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; } static void gve_set_default_desc_cnt(struct gve_priv *priv, @@ -904,6 +954,8 @@ static void gve_enable_supported_features(struct gve_priv *priv, *dev_op_flow_steering, const struct gve_device_option_rss_config *dev_op_rss_config, + const struct gve_device_option_nic_timestamp + *dev_op_nic_timestamp, const struct gve_device_option_modify_ring *dev_op_modify_ring) { @@ -980,10 +1032,15 @@ static void gve_enable_supported_features(struct gve_priv *priv, "RSS device option enabled with key size of %u, lut size of %u.\n", priv->rss_key_size, priv->rss_lut_size); } + + if (dev_op_nic_timestamp && + (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK)) + priv->nic_timestamp_supported = true; } int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL; struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; @@ -1024,6 +1081,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) &dev_op_buffer_sizes, &dev_op_flow_steering, &dev_op_rss_config, + &dev_op_nic_timestamp, &dev_op_modify_ring); if (err) goto free_device_descriptor; @@ -1088,7 +1146,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, dev_op_flow_steering, - dev_op_rss_config, dev_op_modify_ring); + dev_op_rss_config, dev_op_nic_timestamp, + dev_op_modify_ring); free_device_descriptor: dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); @@ -1200,6 +1259,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv) return err; } +int gve_adminq_report_nic_ts(struct gve_priv *priv, + dma_addr_t nic_ts_report_addr) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP); + cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) { + .nic_ts_report_len = + cpu_to_be64(sizeof(struct gve_nic_ts_report)), + .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr), + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, struct gve_ptype_lut *ptype_lut) { diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 228217458275..22a74b6aa17e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -27,6 +27,7 @@ enum gve_adminq_opcodes { GVE_ADMINQ_GET_PTYPE_MAP = 0xE, GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, GVE_ADMINQ_QUERY_FLOW_RULES = 0x10, + GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11, GVE_ADMINQ_QUERY_RSS = 0x12, /* For commands that are larger than 56 bytes */ @@ -174,6 +175,12 @@ struct gve_device_option_rss_config { static_assert(sizeof(struct gve_device_option_rss_config) == 8); +struct gve_device_option_nic_timestamp { + __be32 supported_features_mask; +}; + +static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -192,6 +199,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, + GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd, GVE_DEV_OPT_ID_RSS_CONFIG = 0xe, }; @@ -206,6 +214,7 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0, }; enum gve_sup_feature_mask { @@ -214,6 +223,7 @@ enum gve_sup_feature_mask { GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, GVE_SUP_FLOW_STEERING_MASK = 1 << 5, GVE_SUP_RSS_CONFIG_MASK = 1 << 7, + GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -392,6 +402,21 @@ struct gve_adminq_report_link_speed { static_assert(sizeof(struct gve_adminq_report_link_speed) == 8); +struct gve_adminq_report_nic_ts { + __be64 nic_ts_report_len; + __be64 nic_ts_report_addr; +}; + +static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16); + +struct gve_nic_ts_report { + __be64 nic_timestamp; /* NIC clock in nanoseconds */ + __be64 reserved1; + __be64 reserved2; + __be64 reserved3; + __be64 reserved4; +}; + struct stats { __be32 stat_name; __be32 queue_id; @@ -451,7 +476,7 @@ struct gve_ptype_entry { }; struct gve_ptype_map { - struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */ + struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */ }; struct gve_adminq_get_ptype_map { @@ -585,6 +610,7 @@ union gve_adminq_command { struct gve_adminq_query_flow_rules query_flow_rules; struct gve_adminq_configure_rss configure_rss; struct gve_adminq_query_rss query_rss; + struct gve_adminq_report_nic_ts report_nic_ts; struct gve_adminq_extended_command extended_command; }; }; @@ -624,6 +650,8 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv); int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc); int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); +int gve_adminq_report_nic_ts(struct gve_priv *priv, + dma_addr_t nic_ts_report_addr); struct gve_ptype_lut; int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h index f79cd0591110..d17da841b5a0 100644 --- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h @@ -247,7 +247,8 @@ struct gve_rx_compl_desc_dqo { }; __le32 hash; __le32 reserved6; - __le64 reserved7; + __le32 reserved7; + __le32 ts; /* timestamp in nanosecs */ } __packed; static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 3c1da0cf3f61..d0a223250845 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt", - "adminq_query_rss_cnt", + "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt", }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -456,6 +456,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_cfg_flow_rule_cnt; data[i++] = priv->adminq_cfg_rss_cnt; data[i++] = priv->adminq_query_rss_cnt; + data[i++] = priv->adminq_report_nic_timestamp_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -667,7 +668,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev) struct gve_priv *priv = netdev_priv(netdev); u32 ret_flags = 0; - /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ + /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */ if (priv->ethtool_flags & BIT(0)) ret_flags |= BIT(0); return ret_flags; @@ -798,9 +799,6 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLDEL: err = gve_del_flow_rule(priv, cmd); break; - case ETHTOOL_SRXFH: - err = -EOPNOTSUPP; - break; default: err = -EOPNOTSUPP; break; @@ -835,9 +833,6 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u case ETHTOOL_GRXCLSRLALL: err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); break; - case ETHTOOL_GRXFH: - err = -EOPNOTSUPP; - break; default: err = -EOPNOTSUPP; break; @@ -928,6 +923,27 @@ static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx return 0; } +static int gve_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct gve_priv *priv = netdev_priv(netdev); + + ethtool_op_get_ts_info(netdev, info); + + if (priv->nic_timestamp_supported) { + info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + if (priv->ptp) + info->phc_index = ptp_clock_index(priv->ptp->clock); + } + + return 0; +} + const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, @@ -956,5 +972,5 @@ const struct ethtool_ops gve_ethtool_ops = { .get_priv_flags = gve_get_priv_flags, .set_priv_flags = gve_set_priv_flags, .get_link_ksettings = gve_get_link_ksettings, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = gve_get_ts_info, }; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index dc35a23ec47f..28e4795f5f40 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -619,9 +619,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) err = gve_alloc_counter_array(priv); if (err) goto abort_with_rss_config_cache; - err = gve_alloc_notify_blocks(priv); + err = gve_init_clock(priv); if (err) goto abort_with_counter; + err = gve_alloc_notify_blocks(priv); + if (err) + goto abort_with_clock; err = gve_alloc_stats_report(priv); if (err) goto abort_with_ntfy_blocks; @@ -674,6 +677,8 @@ abort_with_stats_report: gve_free_stats_report(priv); abort_with_ntfy_blocks: gve_free_notify_blocks(priv); +abort_with_clock: + gve_teardown_clock(priv); abort_with_counter: gve_free_counter_array(priv); abort_with_rss_config_cache: @@ -722,6 +727,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); + gve_teardown_clock(priv); gve_clear_device_resources_ok(priv); } @@ -1727,7 +1733,7 @@ int gve_adjust_config(struct gve_priv *priv, { int err; - /* Allocate resources for the new confiugration */ + /* Allocate resources for the new configuration */ err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg); if (err) { netif_err(priv, drv, priv->dev, @@ -2042,6 +2048,46 @@ revert_features: return err; } +static int gve_get_ts_config(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_config) +{ + struct gve_priv *priv = netdev_priv(dev); + + *kernel_config = priv->ts_config; + return 0; +} + +static int gve_set_ts_config(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_config, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(dev); + + if (kernel_config->tx_type != HWTSTAMP_TX_OFF) { + NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported"); + return -ERANGE; + } + + if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) { + if (!priv->nic_ts_report) { + NL_SET_ERR_MSG_MOD(extack, + "RX timestamping is not supported"); + kernel_config->rx_filter = HWTSTAMP_FILTER_NONE; + return -EOPNOTSUPP; + } + + kernel_config->rx_filter = HWTSTAMP_FILTER_ALL; + gve_clock_nic_ts_read(priv); + ptp_schedule_worker(priv->ptp->clock, 0); + } else { + ptp_cancel_worker_sync(priv->ptp->clock); + } + + priv->ts_config.rx_filter = kernel_config->rx_filter; + + return 0; +} + static const struct net_device_ops gve_netdev_ops = { .ndo_start_xmit = gve_start_xmit, .ndo_features_check = gve_features_check, @@ -2053,6 +2099,8 @@ static const struct net_device_ops gve_netdev_ops = { .ndo_bpf = gve_xdp, .ndo_xdp_xmit = gve_xdp_xmit, .ndo_xsk_wakeup = gve_xsk_wakeup, + .ndo_hwtstamp_get = gve_get_ts_config, + .ndo_hwtstamp_set = gve_set_ts_config, }; static void gve_handle_status(struct gve_priv *priv, u32 status) @@ -2236,7 +2284,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) goto err; } - /* Big TCP is only supported on DQ*/ + /* Big TCP is only supported on DQO */ if (!gve_is_gqi(priv)) netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); @@ -2272,6 +2320,9 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; } + priv->ts_config.tx_type = HWTSTAMP_TX_OFF; + priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE; + setup_device: gve_set_netdev_xdp_features(priv); err = gve_setup_device_resources(priv); diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c new file mode 100644 index 000000000000..e96247c9d68d --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_ptp.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2025 Google LLC + */ + +#include "gve.h" +#include "gve_adminq.h" + +/* Interval to schedule a nic timestamp calibration, 250ms. */ +#define GVE_NIC_TS_SYNC_INTERVAL_MS 250 + +/* Read the nic timestamp from hardware via the admin queue. */ +int gve_clock_nic_ts_read(struct gve_priv *priv) +{ + u64 nic_raw; + int err; + + err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus); + if (err) + return err; + + nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp); + WRITE_ONCE(priv->last_sync_nic_counter, nic_raw); + + return 0; +} + +static long gve_ptp_do_aux_work(struct ptp_clock_info *info) +{ + const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info); + struct gve_priv *priv = ptp->priv; + int err; + + if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv)) + goto out; + + err = gve_clock_nic_ts_read(priv); + if (err && net_ratelimit()) + dev_err(&priv->pdev->dev, + "%s read err %d\n", __func__, err); + +out: + return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS); +} + +static const struct ptp_clock_info gve_ptp_caps = { + .owner = THIS_MODULE, + .name = "gve clock", + .do_aux_work = gve_ptp_do_aux_work, +}; + +static int gve_ptp_init(struct gve_priv *priv) +{ + struct gve_ptp *ptp; + int err; + + if (!priv->nic_timestamp_supported) { + dev_dbg(&priv->pdev->dev, "Device does not support PTP\n"); + return -EOPNOTSUPP; + } + + priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL); + if (!priv->ptp) + return -ENOMEM; + + ptp = priv->ptp; + ptp->info = gve_ptp_caps; + ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev); + + if (IS_ERR(ptp->clock)) { + dev_err(&priv->pdev->dev, "PTP clock registration failed\n"); + err = PTR_ERR(ptp->clock); + goto free_ptp; + } + + ptp->priv = priv; + return 0; + +free_ptp: + kfree(ptp); + priv->ptp = NULL; + return err; +} + +static void gve_ptp_release(struct gve_priv *priv) +{ + struct gve_ptp *ptp = priv->ptp; + + if (!ptp) + return; + + if (ptp->clock) + ptp_clock_unregister(ptp->clock); + + kfree(ptp); + priv->ptp = NULL; +} + +int gve_init_clock(struct gve_priv *priv) +{ + int err; + + if (!priv->nic_timestamp_supported) + return 0; + + err = gve_ptp_init(priv); + if (err) + return err; + + priv->nic_ts_report = + dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct gve_nic_ts_report), + &priv->nic_ts_report_bus, + GFP_KERNEL); + if (!priv->nic_ts_report) { + dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__); + err = -ENOMEM; + goto release_ptp; + } + + return 0; + +release_ptp: + gve_ptp_release(priv); + return err; +} + +void gve_teardown_clock(struct gve_priv *priv) +{ + gve_ptp_release(priv); + + if (priv->nic_ts_report) { + dma_free_coherent(&priv->pdev->dev, + sizeof(struct gve_nic_ts_report), + priv->nic_ts_report, priv->nic_ts_report_bus); + priv->nic_ts_report = NULL; + } +} diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index dcb0545baa50..0be41a0cdd15 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -437,6 +437,29 @@ static void gve_rx_skb_hash(struct sk_buff *skb, skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); } +/* Expand the hardware timestamp to the full 64 bits of width, and add it to the + * skb. + * + * This algorithm works by using the passed hardware timestamp to generate a + * diff relative to the last read of the nic clock. This diff can be positive or + * negative, as it is possible that we have read the clock more recently than + * the hardware has received this packet. To detect this, we use the high bit of + * the diff, and assume that the read is more recent if the high bit is set. In + * this case we invert the process. + * + * Note that this means if the time delta between packet reception and the last + * clock read is greater than ~2 seconds, this will provide invalid results. + */ +static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts) +{ + u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter); + struct sk_buff *skb = rx->ctx.skb_head; + u32 low = (u32)last_read; + s32 diff = hwts - low; + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff); +} + static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) { if (!rx->ctx.skb_head) @@ -767,6 +790,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, if (feat & NETIF_F_RXCSUM) gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); + if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL) + gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts)); + /* RSC packets must set gso_size otherwise the TCP stack will complain * that packets are larger than MTU. */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b03b8758c777..5c8c62ea6ac0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5961,8 +5961,8 @@ static int __init hns3_init_module(void) { int ret; - pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); - pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright); client.type = HNAE3_CLIENT_KNIC; snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a7de67699a01..a5b480d59fbf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12904,7 +12904,7 @@ static struct hnae3_ae_algo ae_algo = { static int __init hclge_init(void) { - pr_info("%s is initializing\n", HCLGE_NAME); + pr_debug("%s is initializing\n", HCLGE_NAME); hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); if (!hclge_wq) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c index ae08257dd1d2..3f7f73430be4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -482,7 +482,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, { struct hinic3_sq_wqe_combo wqe_combo = {}; struct hinic3_tx_info *tx_info; - struct hinic3_txq *tx_q = txq; u32 offload, queue_info = 0; struct hinic3_sq_task task; u16 wqebb_cnt, num_sge; @@ -506,9 +505,9 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, if (likely(wqebb_cnt > txq->tx_stop_thrs)) txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs); - netif_subqueue_try_stop(netdev, tx_q->sq->q_id, - hinic3_wq_free_wqebbs(&tx_q->sq->wq), - tx_q->tx_start_thrs); + netif_subqueue_try_stop(netdev, txq->sq->q_id, + hinic3_wq_free_wqebbs(&txq->sq->wq), + txq->tx_start_thrs); return NETDEV_TX_BUSY; } @@ -542,12 +541,11 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, goto err_drop_pkt; } - netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id), - skb->len); - netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id, - hinic3_wq_free_wqebbs(&tx_q->sq->wq), - tx_q->tx_stop_thrs, - tx_q->tx_start_thrs); + netif_subqueue_sent(netdev, txq->sq->q_id, skb->len); + netif_subqueue_maybe_stop(netdev, txq->sq->q_id, + hinic3_wq_free_wqebbs(&txq->sq->wq), + txq->tx_stop_thrs, + txq->tx_start_thrs); hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ, @@ -631,7 +629,6 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) struct net_device *netdev = txq->netdev; u16 hw_ci, sw_ci, q_id = txq->sq->q_id; struct hinic3_tx_info *tx_info; - struct hinic3_txq *tx_q = txq; unsigned int bytes_compl = 0; unsigned int pkts = 0; u16 wqebb_cnt = 0; @@ -663,8 +660,8 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt); netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl, - hinic3_wq_free_wqebbs(&tx_q->sq->wq), - tx_q->tx_start_thrs); + hinic3_wq_free_wqebbs(&txq->sq->wq), + txq->tx_start_thrs); return pkts == HINIC3_TX_POLL_WEIGHT; } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 9364bc2b4eb1..c0bbb12eed2e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -2096,54 +2096,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev, } } -static int e1000_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *info, - u32 __always_unused *rule_locs) +static int e1000_get_rxfh_fields(struct net_device *netdev, + struct ethtool_rxfh_fields *info) { - info->data = 0; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 mrqc; - switch (info->cmd) { - case ETHTOOL_GRXFH: { - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 mrqc; + info->data = 0; - mrqc = er32(MRQC); + mrqc = er32(MRQC); - if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) - return 0; - - switch (info->flow_type) { - case TCP_V4_FLOW: - if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) - info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - fallthrough; - case UDP_V4_FLOW: - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case IPV4_FLOW: - if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) - info->data |= RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) - info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - fallthrough; - case UDP_V6_FLOW: - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case IPV6_FLOW: - if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) - info->data |= RXH_IP_SRC | RXH_IP_DST; - break; - default: - break; - } + if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) return 0; - } + + switch (info->flow_type) { + case TCP_V4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; default: - return -EOPNOTSUPP; + break; } + return 0; } static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) @@ -2352,7 +2345,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_sset_count = e1000e_get_sset_count, .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, - .get_rxnfc = e1000_get_rxnfc, + .get_rxfh_fields = e1000_get_rxfh_fields, .get_ts_info = e1000e_get_ts_info, .get_eee = e1000e_get_eee, .set_eee = e1000e_set_eee, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a96f4cfa6e17..7719e15813ee 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ @@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) adapter->cc.shift = shift; } break; + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + /* System firmware can misreport this value, so set it to a + * stable 38400KHz frequency. + */ + incperiod = INCPERIOD_38400KHZ; + incvalue = INCVALUE_38400KHZ; + shift = INCVALUE_SHIFT_38400KHZ; + adapter->cc.shift = shift; + break; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 89d57dd911dc..ea3c3eb2ef20 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; else adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; break; + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; + break; case e1000_82574: case e1000_82583: adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 1bc5b6c0b897..1954a04460d1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev, return 0; } -static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, - struct ethtool_rxnfc *cmd) +static int fm10k_get_rssh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct fm10k_intfc *interface = netdev_priv(dev); + cmd->data = 0; /* Report default options for RSS on fm10k */ @@ -743,9 +745,6 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, cmd->data = interface->num_rx_queues; ret = 0; break; - case ETHTOOL_GRXFH: - ret = fm10k_get_rss_hash_opts(interface, cmd); - break; default: break; } @@ -753,9 +752,11 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, - struct ethtool_rxnfc *nfc) +static int fm10k_set_rssh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct fm10k_intfc *interface = netdev_priv(dev); int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags); int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, @@ -871,22 +872,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, return 0; } -static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct fm10k_intfc *interface = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = fm10k_set_rss_hash_opt(interface, cmd); - break; - default: - break; - } - - return ret; -} - static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) { struct fm10k_hw *hw = &interface->hw; @@ -1176,7 +1161,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .get_coalesce = fm10k_get_coalesce, .set_coalesce = fm10k_set_coalesce, .get_rxnfc = fm10k_get_rxnfc, - .set_rxnfc = fm10k_set_rxnfc, .get_regs = fm10k_get_regs, .get_regs_len = fm10k_get_regs_len, .self_test = fm10k_self_test, @@ -1186,6 +1170,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .get_rxfh_key_size = fm10k_get_rssrk_size, .get_rxfh = fm10k_get_rssh, .set_rxfh = fm10k_set_rssh, + .get_rxfh_fields = fm10k_get_rssh_fields, + .set_rxfh_fields = fm10k_set_rssh_fields, .get_channels = fm10k_get_channels, .set_channels = fm10k_set_channels, .get_ts_info = ethtool_op_get_ts_info, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c67963bfe14e..54d5fdc303ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -548,6 +548,7 @@ struct i40e_pf { u16 empr_count; /* EMP reset count */ u16 pfr_count; /* PF reset count */ u16 sw_int_count; /* SW interrupt count */ + u32 link_down_events; struct mutex switch_mutex; u16 lan_vsi; /* our default LAN VSI */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 8a7a83f83ee5..2ff17d50135c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -3,6 +3,7 @@ /* ethtool support for i40e */ +#include <linux/net/intel/libie/pctype.h> #include "i40e_devids.h" #include "i40e_diag.h" #include "i40e_txrx_common.h" @@ -2749,6 +2750,15 @@ skip_ol_tests: netif_info(pf, drv, netdev, "testing failed\n"); } +static void i40e_get_link_ext_stats(struct net_device *netdev, + struct ethtool_link_ext_stats *stats) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + stats->link_down_events = pf->link_down_events; +} + static void i40e_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { @@ -3129,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, return __i40e_set_coalesce(netdev, ec, queue); } -/** - * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type - * @pf: pointer to the physical function struct - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow is supported, else Invalid Input. - **/ -static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) +static int i40e_get_rxfh_fields(struct net_device *netdev, + struct ethtool_rxfh_fields *cmd) { + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u8 flow_pctype = 0; u64 i_set = 0; @@ -3146,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) switch (cmd->flow_type) { case TCP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP; break; case UDP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP; break; case TCP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP; break; case UDP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: @@ -3412,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, switch (rule->flow_type) { case SCTP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP; break; case TCP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP; break; case UDP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP; break; case SCTP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP; break; case TCP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP; break; case UDP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP; break; case IP_USER_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER; break; case IPV6_USER_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER; break; default: /* If we have stored a filter with a flow type not listed here @@ -3535,9 +3542,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = vsi->rss_size; ret = 0; break; - case ETHTOOL_GRXFH: - ret = i40e_get_rss_hash_opts(pf, cmd); - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = pf->fdir_pf_active_filters; /* report total rule count */ @@ -3566,7 +3570,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, * Returns value of bits to be set per user request **/ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, - struct ethtool_rxnfc *nfc, + const struct ethtool_rxfh_fields *nfc, u64 i_setc) { u64 i_set = i_setc; @@ -3611,15 +3615,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, } #define FLOW_PCTYPES_SIZE 64 -/** - * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash - * @pf: pointer to the physical function struct - * @nfc: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - **/ -static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) +static int i40e_set_rxfh_fields(struct net_device *netdev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); @@ -3643,40 +3645,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: - set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) - set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, flow_pctypes); break; case TCP_V6_FLOW: - set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) - set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, flow_pctypes); break; case UDP_V4_FLOW: - set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { - set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, + set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, flow_pctypes); - set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, + set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, flow_pctypes); } - hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); + set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { - set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, + set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, flow_pctypes); - set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, + set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, flow_pctypes); } - hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -3685,7 +3687,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -3694,15 +3696,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4); break; case IPV6_FLOW: - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6); break; default: return -EINVAL; @@ -4312,36 +4314,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, switch (fsp->flow_type & ~FLOW_EXT) { case SCTP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP; fdir_filter_count = &pf->fd_sctp4_filter_cnt; break; case TCP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP; fdir_filter_count = &pf->fd_tcp4_filter_cnt; break; case UDP_V4_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP; fdir_filter_count = &pf->fd_udp4_filter_cnt; break; case SCTP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP; fdir_filter_count = &pf->fd_sctp6_filter_cnt; break; case TCP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP; fdir_filter_count = &pf->fd_tcp6_filter_cnt; break; case UDP_V6_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP; fdir_filter_count = &pf->fd_udp6_filter_cnt; break; case IP_USER_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER; fdir_filter_count = &pf->fd_ip4_filter_cnt; flex_l3 = true; break; case IPV6_USER_FLOW: - index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; + index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER; fdir_filter_count = &pf->fd_ip6_filter_cnt; flex_l3 = true; break; @@ -4677,8 +4679,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, * separate support, we'll always assume and enforce that the two flow * types must have matching input sets. */ - if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4, new_mask); /* Add the new offset and update table, if necessary */ @@ -4954,13 +4956,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = i40e_set_rss_hash_opt(pf, cmd); - break; case ETHTOOL_SRXCLSRLINS: ret = i40e_add_fdir_ethtool(vsi, cmd); break; @@ -5809,6 +5807,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_regs = i40e_get_regs, .nway_reset = i40e_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ext_stats = i40e_get_link_ext_stats, .get_wol = i40e_get_wol, .set_wol = i40e_set_wol, .set_eeprom = i40e_set_eeprom, @@ -5835,6 +5834,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_rxfh_indir_size = i40e_get_rxfh_indir_size, .get_rxfh = i40e_get_rxfh, .set_rxfh = i40e_set_rxfh, + .get_rxfh_fields = i40e_get_rxfh_fields, + .set_rxfh_fields = i40e_set_rxfh_fields, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_module_info = i40e_get_module_info, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f1c9e575703e..3b4f59d978a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3,6 +3,7 @@ #include <generated/utsrelease.h> #include <linux/crash_dump.h> +#include <linux/net/intel/libie/pctype.h> #include <linux/if_bridge.h> #include <linux/if_macvlan.h> #include <linux/module.h> @@ -9188,47 +9189,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) i40e_reset_fdir_filter_cnt(pf); /* Reprogram the default input set for TCP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for TCP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for Other/IPv4 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); /* Reprogram the default input set for Other/IPv6 */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } @@ -9656,7 +9657,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) * settings. It is safe to restore the default input set * because there are no active TCPv4 filter rules. */ - i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); @@ -9959,6 +9960,9 @@ static void i40e_link_event(struct i40e_pf *pf) new_link == netif_carrier_ok(vsi->netdev))) return; + if (!new_link && old_link) + pf->link_down_events++; + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to @@ -12507,7 +12511,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); - hena |= i40e_pf_get_default_rss_hena(pf); + hena |= i40e_pf_get_default_rss_hashcfg(pf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); @@ -15891,7 +15895,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; - pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c006f716a3bd..048c33039130 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/pctype.h> #include <linux/net/intel/libie/rx.h> #include <linux/prefetch.h> #include <linux/sctp.h> @@ -397,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi, ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_UDPIP_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP); + LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_UDPIP6_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP); + LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP); if (ret) { kfree(raw_packet); @@ -444,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_TCPIP_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_TCPIP6_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP); if (ret) { kfree(raw_packet); @@ -499,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi, ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_SCTPIP_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); + LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_SCTPIP6_DUMMY_PACKET_LEN, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); + LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP); if (ret) { kfree(raw_packet); @@ -543,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi, int i; if (ipv4) { - iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4; + iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER; + iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4; } else { - iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; - iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6; + iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER; + iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6; } for (i = iter_start; i <= iter_end; i++) { @@ -2948,9 +2949,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, tx_ring->queue_index); flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? - (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << + (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : - (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << + (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 7c26c9a2bf65..1e5fd63d47f4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -4,6 +4,7 @@ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ +#include <linux/net/intel/libie/pctype.h> #include <net/xdp.h> #include "i40e_type.h" @@ -71,30 +72,30 @@ enum i40e_dyn_idx { #define I40E_SW_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ -#define I40E_DEFAULT_RSS_HENA ( \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) - -#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) - -#define i40e_pf_get_default_rss_hena(pf) \ +#define I40E_DEFAULT_RSS_HASHCFG ( \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hashcfg(pf) \ (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \ - I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG) /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 28568e126850..a09ed83835ff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -929,38 +929,6 @@ struct i40e_filter_program_desc { #define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) -/* Packet Classifier Types for filters */ -enum i40e_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - I40E_FILTER_PCTYPE_FCOE_OX = 48, - I40E_FILTER_PCTYPE_FCOE_RX = 49, - I40E_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - enum i40e_filter_program_desc_dest { I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 88e6bef69342..b232edf68ab1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -812,7 +812,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) } if (!idx) { - u64 hena = i40e_pf_get_default_rss_hena(pf); + u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf); u8 broadcast[ETH_ALEN]; vf->lan_vsi_idx = vsi->idx; @@ -841,8 +841,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_hash_lock); - wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); - wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); + wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg); + wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(hashcfg >> 32)); /* program mac filter only for VF VSI */ ret = i40e_sync_vsi_filters(vsi); if (ret) @@ -3447,15 +3448,15 @@ err: } /** - * i40e_vc_get_rss_hena + * i40e_vc_get_rss_hashcfg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * Return the RSS HENA bits allowed by the hardware + * Return the RSS Hash configuration bits allowed by the hardware **/ -static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) +static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg) { - struct virtchnl_rss_hena *vrh = NULL; + struct virtchnl_rss_hashcfg *vrh = NULL; struct i40e_pf *pf = vf->pf; int aq_ret = 0; int len = 0; @@ -3464,7 +3465,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) aq_ret = -EINVAL; goto err; } - len = sizeof(struct virtchnl_rss_hena); + len = sizeof(struct virtchnl_rss_hashcfg); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { @@ -3472,26 +3473,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) len = 0; goto err; } - vrh->hena = i40e_pf_get_default_rss_hena(pf); + vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf); err: /* send the response back to the VF */ - aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, aq_ret, (u8 *)vrh, len); kfree(vrh); return aq_ret; } /** - * i40e_vc_set_rss_hena + * i40e_vc_set_rss_hashcfg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * Set the RSS HENA bits for the VF + * Set the RSS Hash configuration bits for the VF **/ -static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) +static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg) { - struct virtchnl_rss_hena *vrh = - (struct virtchnl_rss_hena *)msg; + struct virtchnl_rss_hashcfg *vrh = + (struct virtchnl_rss_hashcfg *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int aq_ret = 0; @@ -3500,13 +3501,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) aq_ret = -EINVAL; goto err; } - i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), + (u32)vrh->hashcfg); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), - (u32)(vrh->hena >> 32)); + (u32)(vrh->hashcfg >> 32)); /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret); } /** @@ -4253,11 +4255,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_CONFIG_RSS_LUT: ret = i40e_vc_config_rss_lut(vf, msg); break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - ret = i40e_vc_get_rss_hena(vf, msg); + case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: + ret = i40e_vc_get_rss_hashcfg(vf, msg); break; - case VIRTCHNL_OP_SET_RSS_HENA: - ret = i40e_vc_set_rss_hena(vf, msg); + case VIRTCHNL_OP_SET_RSS_HASHCFG: + ret = i40e_vc_set_rss_hashcfg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: ret = i40e_vc_enable_vlan_stripping(vf, msg); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index f7a98ff43a57..a87e0c6d4017 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -114,8 +114,6 @@ struct iavf_q_vector { u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; bool arm_wb_state; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; }; /* Helper macros to switch between ints/sec and what the register uses. @@ -315,8 +313,8 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ #define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11) -#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11) +#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12) #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) #define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15) @@ -456,7 +454,7 @@ struct iavf_adapter { u32 aq_wait_count; /* RSS stuff */ enum virtchnl_rss_algorithm hfunc; - u64 hena; + u64 rss_hashcfg; u16 rss_key_size; u16 rss_lut_size; u8 *rss_key; @@ -600,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter); bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter); void iavf_request_stats(struct iavf_adapter *adapter); int iavf_request_reset(struct iavf_adapter *adapter); -void iavf_get_hena(struct iavf_adapter *adapter); -void iavf_set_hena(struct iavf_adapter *adapter); +void iavf_get_rss_hashcfg(struct iavf_adapter *adapter); +void iavf_set_rss_hashcfg(struct iavf_adapter *adapter); void iavf_set_rss_key(struct iavf_adapter *adapter); void iavf_set_rss_lut(struct iavf_adapter *adapter); void iavf_set_rss_hfunc(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 2b2b315205b5..05d72be3fe80 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1307,14 +1307,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx return iavf_fdir_del_fltr(adapter, false, fsp->location); } -/** - * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input - * @cmd: ethtool rxnfc command - * - * This function parses the rxnfc command and returns intended - * header types for RSS configuration - */ -static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) +static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd) { u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; @@ -1350,15 +1343,8 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) return hdrs; } -/** - * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input - * @cmd: ethtool rxnfc command - * @symm: true if Symmetric Topelitz is set - * - * This function parses the rxnfc command and returns intended hash fields for - * RSS configuration - */ -static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm) +static u64 +iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) { u64 hfld = IAVF_ADV_RSS_HASH_INVALID; @@ -1416,17 +1402,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm) return hfld; } -/** - * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash - * @adapter: pointer to the VF adapter structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - */ static int -iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, - struct ethtool_rxnfc *cmd) +iavf_set_rxfh_fields(struct net_device *netdev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) { + struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adv_rss *rss_old, *rss_new; bool rss_new_add = false; bool symm = false; @@ -1493,17 +1474,10 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, return err; } -/** - * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type - * @adapter: pointer to the VF adapter structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - */ static int -iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter, - struct ethtool_rxnfc *cmd) +iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd) { + struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adv_rss *rss; u64 hash_flds; u32 hdrs; @@ -1568,9 +1542,6 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLDEL: ret = iavf_del_fdir_ethtool(adapter, cmd); break; - case ETHTOOL_SRXFH: - ret = iavf_set_adv_rss_hash_opt(adapter, cmd); - break; default: break; } @@ -1612,9 +1583,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); break; - case ETHTOOL_GRXFH: - ret = iavf_get_adv_rss_hash_opt(adapter, cmd); - break; default: break; } @@ -1812,6 +1780,8 @@ static const struct ethtool_ops iavf_ethtool_ops = { .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, .set_rxfh = iavf_set_rxfh, + .get_rxfh_fields = iavf_get_rxfh_fields, + .set_rxfh_fields = iavf_set_rxfh_fields, .get_channels = iavf_get_channels, .set_channels = iavf_set_channels, .get_rxfh_key_size = iavf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 81d7249d1149..c859a096de9f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -528,33 +528,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) } /** - * iavf_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - **/ -static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct iavf_q_vector *q_vector = - container_of(notify, struct iavf_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * iavf_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - **/ -static void iavf_irq_affinity_release(struct kref *ref) {} - -/** * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure * @basename: device basename @@ -568,7 +541,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; - int cpu; iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -603,17 +575,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } - /* register for affinity change notifications */ - q_vector->affinity_notify.notify = iavf_irq_affinity_notify; - q_vector->affinity_notify.release = - iavf_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* Spread the IRQ affinity hints across online CPUs. Note that - * get_cpu_mask returns a mask with a permanent lifetime so - * it's safe to use as a hint for irq_update_affinity_hint. - */ - cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -622,8 +583,6 @@ free_queue_irqs: while (vector) { vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } return err; @@ -665,6 +624,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter) **/ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { + struct iavf_q_vector *q_vector; int vector, irq_num, q_vectors; if (!adapter->msix_entries) @@ -673,10 +633,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { + q_vector = &adapter->q_vectors[vector]; + netif_napi_set_irq_locked(&q_vector->napi, -1); irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); - free_irq(irq_num, &adapter->q_vectors[vector]); + free_irq(irq_num, q_vector); } } @@ -1823,12 +1783,13 @@ static int iavf_init_rss(struct iavf_adapter *adapter) /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; + adapter->rss_hashcfg = + IAVF_DEFAULT_RSS_HASHCFG_EXPANDED; else - adapter->hena = IAVF_DEFAULT_RSS_HENA; + adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG; - wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg); + wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32)); } iavf_fill_rss_lut(adapter); @@ -1846,7 +1807,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter) **/ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { - int q_idx = 0, num_q_vectors; + int q_idx = 0, num_q_vectors, irq_num; struct iavf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; @@ -1856,14 +1817,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector; q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; - cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add_locked(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_config_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll, q_idx); + netif_napi_set_irq_locked(&q_vector->napi, irq_num); } return 0; @@ -2195,12 +2157,12 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { - iavf_get_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) { + iavf_get_rss_hashcfg(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { - iavf_set_hena(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) { + iavf_set_rss_hashcfg(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { @@ -5387,6 +5349,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_alloc_etherdev; } + netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 422312b8b54a..aaf70c625655 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -723,7 +723,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; - page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); + libeth_rx_recycle_slow(rx_fqes->netmem); if (unlikely(++i == rx_ring->count)) i = 0; @@ -1197,10 +1197,11 @@ static void iavf_add_rx_frag(struct sk_buff *skb, const struct libeth_fqe *rx_buffer, unsigned int size) { - u32 hr = rx_buffer->page->pp->p.offset; + u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->offset + hr, size, rx_buffer->truesize); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, + rx_buffer->netmem, rx_buffer->offset + hr, + size, rx_buffer->truesize); } /** @@ -1214,12 +1215,13 @@ static void iavf_add_rx_frag(struct sk_buff *skb, static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, unsigned int size) { - u32 hr = rx_buffer->page->pp->p.offset; + struct page *buf_page = __netmem_to_page(rx_buffer->netmem); + u32 hr = buf_page->pp->p.offset; struct sk_buff *skb; void *va; /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->offset; + va = page_address(buf_page) + rx_buffer->offset; net_prefetch(va + hr); /* build an skb around the page buffer */ @@ -1648,7 +1650,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ - if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + if (!cpumask_test_cpu(cpu_id, + &q_vector->napi.config->affinity_mask)) { /* Tell napi that we are done polling */ napi_complete_done(napi, work_done); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 79ad554f2d53..df49b0b1d54a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -4,6 +4,8 @@ #ifndef _IAVF_TXRX_H_ #define _IAVF_TXRX_H_ +#include <linux/net/intel/libie/pctype.h> + /* Interrupt Throttling and Rate Limiting Goodies */ #define IAVF_DEFAULT_IRQ_WORK 256 @@ -59,26 +61,26 @@ enum iavf_dyn_idx_t { #define IAVF_PE_ITR IAVF_IDX_ITR2 /* Supported RSS offloads */ -#define IAVF_DEFAULT_RSS_HENA ( \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD)) - -#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) +#define IAVF_DEFAULT_RSS_HASHCFG ( \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD)) + +#define IAVF_DEFAULT_RSS_HASHCFG_EXPANDED (IAVF_DEFAULT_RSS_HASHCFG | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IAVF_RX_INCREMENT(r, i) \ diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index f9e1319620f4..cb12e86ba4a6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -463,38 +463,6 @@ enum iavf_tx_ctx_desc_cmd_bits { IAVF_TX_CTX_DESC_SWPE = 0x40 }; -/* Packet Classifier Types for filters */ -enum iavf_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - IAVF_FILTER_PCTYPE_FCOE_OX = 48, - IAVF_FILTER_PCTYPE_FCOE_RX = 49, - IAVF_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - #define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30 #define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 07f0d0a0f1e2..31a4289fc0ee 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1145,12 +1145,12 @@ void iavf_request_stats(struct iavf_adapter *adapter) } /** - * iavf_get_hena + * iavf_get_rss_hashcfg * @adapter: adapter structure * - * Request hash enable capabilities from PF + * Request RSS Hash enable bits from PF **/ -void iavf_get_hena(struct iavf_adapter *adapter) +void iavf_get_rss_hashcfg(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1158,20 +1158,20 @@ void iavf_get_hena(struct iavf_adapter *adapter) adapter->current_op); return; } - adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; - adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; - iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); + adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS; + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0); } /** - * iavf_set_hena + * iavf_set_rss_hashcfg * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities **/ -void iavf_set_hena(struct iavf_adapter *adapter) +void iavf_set_rss_hashcfg(struct iavf_adapter *adapter) { - struct virtchnl_rss_hena vrh; + struct virtchnl_rss_hashcfg vrh; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -1179,10 +1179,10 @@ void iavf_set_hena(struct iavf_adapter *adapter) adapter->current_op); return; } - vrh.hena = adapter->hena; - adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; - adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; - iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, + vrh.hashcfg = adapter->rss_hashcfg; + adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG; + adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh, sizeof(vrh)); } @@ -2752,11 +2752,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { - struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: { + struct virtchnl_rss_hashcfg *vrh = + (struct virtchnl_rss_hashcfg *)msg; if (msglen == sizeof(*vrh)) - adapter->hena = vrh->hena; + adapter->rss_hashcfg = vrh->hashcfg; else dev_warn(&adapter->pdev->dev, "Invalid message %d from PF\n", v_opcode); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index ddd0ad68185b..dcf87efb9f20 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -614,6 +614,7 @@ struct ice_pf { u16 globr_count; /* Global reset count */ u16 empr_count; /* EMP reset count */ u16 pfr_count; /* PF reset count */ + u32 link_down_events; u8 wol_ena : 1; /* software state of WoL */ u32 wakeup_reason; /* last wakeup reason */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index bdee499f991a..0ae7387e0599 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -2272,6 +2272,22 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6 + +struct ice_cgu_input_measure { + u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES]; + __le32 freq; +} __packed __aligned(sizeof(__le16)); + +#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0) + +/* Get CGU input measure command response data structure (indirect 0x0C59) */ +struct ice_aqc_get_cgu_input_measure { + u8 dpll_idx_opt; + u8 length; + u8 rsvd[6]; +}; + #define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0) /* Get CGU abilities command response data structure (indirect 0x0C61) */ @@ -2721,6 +2737,7 @@ struct ice_aq_desc { struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_download_pkg download_pkg; + struct ice_aqc_get_cgu_input_measure get_cgu_input_measure; struct ice_aqc_set_cgu_input_config set_cgu_input_config; struct ice_aqc_get_cgu_input_config get_cgu_input_config; struct ice_aqc_set_cgu_output_config set_cgu_output_config; @@ -2772,6 +2789,8 @@ enum ice_aq_err { ICE_AQ_RC_OK = 0, /* Success */ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */ ICE_AQ_RC_ENOENT = 2, /* No such element */ + ICE_AQ_RC_ESRCH = 3, /* Bad opcode */ + ICE_AQ_RC_EAGAIN = 8, /* Try again */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ @@ -2927,6 +2946,7 @@ enum ice_adminq_opc { ice_aqc_opc_get_pkg_info_list = 0x0C43, /* 1588/SyncE commands/events */ + ice_aqc_opc_get_cgu_input_measure = 0x0C59, ice_aqc_opc_get_cgu_abilities = 0x0C61, ice_aqc_opc_set_cgu_input_config = 0x0C62, ice_aqc_opc_get_cgu_input_config = 0x0C63, diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 2bc5c7f59844..1f7834c03550 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto) } /** + * ice_arfs_cmp - Check if aRFS filter matches this flow. + * @fltr_info: filter info of the saved ARFS entry. + * @fk: flow dissector keys. + * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6). + * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP. + * + * Since this function assumes limited values for n_proto and ip_proto, it + * is meant to be called only from ice_rx_flow_steer(). + * + * Return: + * * true - fltr_info refers to the same flow as fk. + * * false - fltr_info and fk refer to different flows. + */ +static bool +ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk, + __be16 n_proto, u8 ip_proto) +{ + /* Determine if the filter is for IPv4 or IPv6 based on flow_type, + * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}. + */ + bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP; + + /* Following checks are arranged in the quickest and most discriminative + * fields first for early failure. + */ + if (is_v4) + return n_proto == htons(ETH_P_IP) && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst && + fltr_info->ip.v4.proto == ip_proto; + + return fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + fltr_info->ip.v6.proto == ip_proto && + !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); +} + +/** * ice_rx_flow_steer - steer the Rx flow to where application is being run * @netdev: ptr to the netdev being adjusted * @skb: buffer with required header information @@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, continue; fltr_info = &arfs_entry->fltr_info; + + if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto)) + continue; + ret = fltr_info->fltr_id; if (fltr_info->q_index == rxq_idx || diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 6db4ad8fc70b..270f936ce807 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -623,7 +623,10 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) return 0; } - ice_alloc_rx_bufs(ring, num_bufs); + if (ring->vsi->type == ICE_VSI_CTRL) + ice_init_ctrl_rx_descs(ring, num_bufs); + else + ice_alloc_rx_bufs(ring, num_bufs); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 4fedf0181c4e..48ff515d7c61 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4971,6 +4971,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, } /** + * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements + * @hw: pointer to the HW struct + * @dpll_idx: index of dpll to be measured + * @meas: array to be filled with results + * @meas_num: max number of results array can hold + * + * Get CGU measurements (0x0C59) of phase and frequency offsets for input + * pins on given dpll. + * + * Return: 0 on success or negative value on failure. + */ +int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx, + struct ice_cgu_input_measure *meas, + u16 meas_num) +{ + struct ice_aqc_get_cgu_input_measure *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure); + cmd = &desc.params.get_cgu_input_measure; + cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M; + + return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL); +} + +/** * ice_aq_get_cgu_abilities - get cgu abilities * @hw: pointer to the HW struct * @abilities: CGU abilities diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 64c530b39191..c70f56d897dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -229,6 +229,9 @@ void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag); +int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx, + struct ice_cgu_input_measure *meas, + u16 meas_num); int ice_aq_get_cgu_abilities(struct ice_hw *hw, struct ice_aqc_get_cgu_abilities *abilities); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index bce3ad6ca2a6..d6190d9e32ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -11,6 +11,30 @@ #define ICE_DPLL_RCLK_NUM_PER_PF 1 #define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25 #define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125 +#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff +#define ICE_DPLL_INPUT_REF_NUM 10 +#define ICE_DPLL_PHASE_OFFSET_PERIOD 2 +#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4 +#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6 +#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0 + +#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \ + (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx)) + +#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \ + (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX)) + +#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \ + (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX)) + +#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \ + (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx)) + +#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \ + (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX)) + +#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \ + (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX)) /** * enum ice_dpll_pin_type - enumerate ice pin types: @@ -18,25 +42,61 @@ * @ICE_DPLL_PIN_TYPE_INPUT: input pin * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin + * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins */ enum ice_dpll_pin_type { ICE_DPLL_PIN_INVALID, ICE_DPLL_PIN_TYPE_INPUT, ICE_DPLL_PIN_TYPE_OUTPUT, ICE_DPLL_PIN_TYPE_RCLK_INPUT, + ICE_DPLL_PIN_TYPE_SOFTWARE, }; static const char * const pin_type_name[] = { [ICE_DPLL_PIN_TYPE_INPUT] = "input", [ICE_DPLL_PIN_TYPE_OUTPUT] = "output", [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", + [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software", }; +static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" }; +static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" }; + static const struct dpll_pin_frequency ice_esync_range[] = { DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ), }; /** + * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW + * @pf: private board structure + * @index: index of a pin as understood by FW + * @input: true for input, false for output + * + * Check if the pin shall be controlled by SW - instead of providing raw access + * for pin control. For E810 NIC with dpll there is additional MUX-related logic + * between SMA/U.FL pins/connectors and dpll device, best to give user access + * with series of wrapper functions as from user perspective they convey single + * functionality rather then separated pins. + * + * Return: + * * true - pin controlled by SW + * * false - pin not controlled by SW + */ +static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input) +{ + if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP) + index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP - + ICE_DPLL_SW_PIN_INPUT_BASE_SFP; + + if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX || + index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) || + (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX || + index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX))) + return true; + return false; +} + +/** * ice_dpll_is_reset - check if reset is in progress * @pf: private board structure * @extack: error reporting @@ -280,6 +340,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, } /** + * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Calls set frequency command for corresponding and active input/output pin. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not active or couldn't get from hw + */ +static int +ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 frequency, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *sma = pin_priv; + int ret; + + if (!sma->active) { + NL_SET_ERR_MSG(extack, "pin is not active"); + return -EINVAL; + } + if (sma->direction == DPLL_PIN_DIRECTION_INPUT) + ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll, + dpll_priv, frequency, + extack); + else + ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll, + dpll_priv, frequency, + extack); + + return ret; +} + +/** + * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Calls get frequency command for corresponding active input/output. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not active or couldn't get from hw + */ +static int +ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *sma = pin_priv; + int ret; + + if (!sma->active) { + *frequency = 0; + return 0; + } + if (sma->direction == DPLL_PIN_DIRECTION_INPUT) { + ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll, + dpll_priv, frequency, + extack); + } else { + ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll, + dpll_priv, frequency, + extack); + } + + return ret; +} + +/** * ice_dpll_pin_enable - enable a pin on dplls * @hw: board private hw structure * @pin: pointer to a pin @@ -375,6 +516,67 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin, } /** + * ice_dpll_sw_pins_update - update status of all SW pins + * @pf: private board struct + * + * Determine and update pin struct fields (direction/active) of their current + * values for all the SW controlled pins. + * + * Context: Call with pf->dplls.lock held + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_sw_pins_update(struct ice_pf *pf) +{ + struct ice_dplls *d = &pf->dplls; + struct ice_dpll_pin *p; + u8 data = 0; + int ret; + + ret = ice_read_sma_ctrl(&pf->hw, &data); + if (ret) + return ret; + /* no change since last check */ + if (d->sma_data == data) + return 0; + + /* + * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide + * on their direction and if are active + */ + p = &d->sma[ICE_DPLL_PIN_SW_1_IDX]; + p->active = true; + p->direction = DPLL_PIN_DIRECTION_INPUT; + if (data & ICE_SMA1_DIR_EN) { + p->direction = DPLL_PIN_DIRECTION_OUTPUT; + if (data & ICE_SMA1_TX_EN) + p->active = false; + } + + p = &d->sma[ICE_DPLL_PIN_SW_2_IDX]; + p->active = true; + p->direction = DPLL_PIN_DIRECTION_INPUT; + if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK) + p->active = false; + else if (data & ICE_SMA2_DIR_EN) + p->direction = DPLL_PIN_DIRECTION_OUTPUT; + + p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX]; + if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN))) + p->active = true; + else + p->active = false; + + p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX]; + p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS); + d->sma_data = data; + + return 0; +} + +/** * ice_dpll_pin_state_update - update pin's state * @pf: private board struct * @pin: structure with pin attributes to be updated @@ -471,6 +673,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, DPLL_PIN_STATE_DISCONNECTED; } break; + case ICE_DPLL_PIN_TYPE_SOFTWARE: + ret = ice_dpll_sw_pins_update(pf); + if (ret) + goto err; + break; default: return -EINVAL; } @@ -588,6 +795,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv, } /** + * ice_dpll_phase_offset_monitor_set - set phase offset monitor state + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: feature state to be set + * @extack: error reporting + * + * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll. + * + * Context: Acquires and releases pf->dplls.lock + * Return: 0 - success + */ +static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_feature_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + if (state == DPLL_FEATURE_STATE_ENABLE) + d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD; + else + d->phase_offset_monitor_period = 0; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_phase_offset_monitor_get - get phase offset monitor state + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds current state of phase offset monitor + * @extack: error reporting + * + * Dpll subsystem callback. Provides current state of phase offset monitor + * features on dpll device. + * + * Context: Acquires and releases pf->dplls.lock + * Return: 0 - success + */ +static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_feature_state *state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + if (d->phase_offset_monitor_period) + *state = DPLL_FEATURE_STATE_ENABLE; + else + *state = DPLL_FEATURE_STATE_DISABLE; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** * ice_dpll_pin_state_set - set pin's state on dpll * @pin: pointer to a pin * @pin_priv: private data pointer passed on pin registration @@ -793,6 +1061,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv, } /** + * ice_dpll_sma_direction_set - set direction of SMA pin + * @p: pointer to a pin + * @direction: requested direction of the pin + * @extack: error reporting + * + * Wrapper for dpll subsystem callback. Set direction of a SMA pin. + * + * Context: Call with pf->dplls.lock held + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p, + enum dpll_pin_direction direction, + struct netlink_ext_ack *extack) +{ + u8 data; + int ret; + + if (p->direction == direction && p->active) + return 0; + ret = ice_read_sma_ctrl(&p->pf->hw, &data); + if (ret) + return ret; + + switch (p->idx) { + case ICE_DPLL_PIN_SW_1_IDX: + data &= ~ICE_SMA1_MASK; + if (direction == DPLL_PIN_DIRECTION_OUTPUT) + data |= ICE_SMA1_DIR_EN; + break; + case ICE_DPLL_PIN_SW_2_IDX: + if (direction == DPLL_PIN_DIRECTION_INPUT) { + data &= ~ICE_SMA2_DIR_EN; + } else { + data &= ~ICE_SMA2_TX_EN; + data |= ICE_SMA2_DIR_EN; + } + break; + default: + return -EINVAL; + } + ret = ice_write_sma_ctrl(&p->pf->hw, data); + if (!ret) + ret = ice_dpll_pin_state_update(p->pf, p, + ICE_DPLL_PIN_TYPE_SOFTWARE, + extack); + + return ret; +} + +/** + * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: requested state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Set the state of a pin. + * + * Context: Acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv, *target; + struct ice_dpll *d = dpll_priv; + enum ice_dpll_pin_type type; + struct ice_pf *pf = p->pf; + struct ice_hw *hw; + bool enable; + u8 data; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + + mutex_lock(&pf->dplls.lock); + hw = &pf->hw; + ret = ice_read_sma_ctrl(hw, &data); + if (ret) + goto unlock; + + ret = -EINVAL; + switch (p->idx) { + case ICE_DPLL_PIN_SW_1_IDX: + if (state == DPLL_PIN_STATE_CONNECTED) { + data &= ~ICE_SMA1_MASK; + enable = true; + } else if (state == DPLL_PIN_STATE_DISCONNECTED) { + data |= ICE_SMA1_TX_EN; + enable = false; + } else { + goto unlock; + } + target = p->output; + type = ICE_DPLL_PIN_TYPE_OUTPUT; + break; + case ICE_DPLL_PIN_SW_2_IDX: + if (state == DPLL_PIN_STATE_SELECTABLE) { + data |= ICE_SMA2_DIR_EN; + data &= ~ICE_SMA2_UFL2_RX_DIS; + enable = true; + } else if (state == DPLL_PIN_STATE_DISCONNECTED) { + data |= ICE_SMA2_UFL2_RX_DIS; + enable = false; + } else { + goto unlock; + } + target = p->input; + type = ICE_DPLL_PIN_TYPE_INPUT; + break; + default: + goto unlock; + } + + ret = ice_write_sma_ctrl(hw, data); + if (ret) + goto unlock; + ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE, + extack); + if (ret) + goto unlock; + + if (enable) + ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack); + else + ret = ice_dpll_pin_disable(hw, target, type, extack); + if (!ret) + ret = ice_dpll_pin_state_update(pf, target, type, extack); + +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_sw_pin_state_get - get SW pin state + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Check state of a SW pin. + * + * Context: Acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = p->pf; + int ret = 0; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (!p->active) { + *state = DPLL_PIN_STATE_DISCONNECTED; + goto unlock; + } + + if (p->direction == DPLL_PIN_DIRECTION_INPUT) { + ret = ice_dpll_pin_state_update(pf, p->input, + ICE_DPLL_PIN_TYPE_INPUT, + extack); + if (ret) + goto unlock; + *state = p->input->state[d->dpll_idx]; + } else { + ret = ice_dpll_pin_state_update(pf, p->output, + ICE_DPLL_PIN_TYPE_OUTPUT, + extack); + if (ret) + goto unlock; + *state = p->output->state[d->dpll_idx]; + } +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: requested state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Set state of a pin. + * + * Context: Acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *sma = pin_priv, *target; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = sma->pf; + enum ice_dpll_pin_type type; + bool enable; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + + mutex_lock(&pf->dplls.lock); + if (!sma->active) { + ret = ice_dpll_sma_direction_set(sma, sma->direction, extack); + if (ret) + goto unlock; + } + if (sma->direction == DPLL_PIN_DIRECTION_INPUT) { + enable = state == DPLL_PIN_STATE_SELECTABLE; + target = sma->input; + type = ICE_DPLL_PIN_TYPE_INPUT; + } else { + enable = state == DPLL_PIN_STATE_CONNECTED; + target = sma->output; + type = ICE_DPLL_PIN_TYPE_OUTPUT; + } + + if (enable) + ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type, + extack); + else + ret = ice_dpll_pin_disable(&pf->hw, target, type, extack); + if (!ret) + ret = ice_dpll_pin_state_update(pf, target, type, extack); + +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** * ice_dpll_input_prio_get - get dpll's input prio * @pin: pointer to a pin * @pin_priv: private data pointer passed on pin registration @@ -860,6 +1392,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, return ret; } +static int +ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 *prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT) + *prio = d->input_prio[p->input->idx]; + else + *prio = ICE_DPLL_PIN_PRIO_OUTPUT; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +static int +ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT) + return -EINVAL; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + /** * ice_dpll_input_direction - callback for get input pin direction * @pin: pointer to a pin @@ -911,6 +1484,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv, } /** + * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: requested pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting direction of a SMA pin. + * + * Context: Acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction direction, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_pf *pf = p->pf; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_sma_direction_set(p, direction, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_pin_sw_direction_get - callback for get SW pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: on success holds pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting direction of a SMA pin. + * + * Context: Acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_pf *pf = p->pf; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + *direction = p->direction; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** * ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value * @pin: pointer to a pin * @pin_priv: private data pointer passed on pin registration @@ -1024,7 +1667,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, * Dpll subsystem callback. Wraps a handler for setting phase adjust on input * pin. * - * Context: Calls a function which acquires pf->dplls.lock + * Context: Calls a function which acquires and releases pf->dplls.lock * Return: * * 0 - success * * negative - error @@ -1068,6 +1711,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, ICE_DPLL_PIN_TYPE_OUTPUT); } +/** + * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: on success holds phase adjust value + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw + * pin. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 *phase_adjust, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + + if (p->direction == DPLL_PIN_DIRECTION_INPUT) + return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input, + dpll, dpll_priv, + phase_adjust, extack); + else + return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output, + dpll, dpll_priv, + phase_adjust, extack); +} + +/** + * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for setting phase adjust on output + * pin. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + + if (!p->active) { + NL_SET_ERR_MSG(extack, "pin is not active"); + return -EINVAL; + } + if (p->direction == DPLL_PIN_DIRECTION_INPUT) + return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input, + dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_INPUT); + else + return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output, + dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_OUTPUT); +} + #define ICE_DPLL_PHASE_OFFSET_DIVIDER 100 #define ICE_DPLL_PHASE_OFFSET_FACTOR \ (DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER) @@ -1093,12 +1812,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, const struct dpll_device *dpll, void *dpll_priv, s64 *phase_offset, struct netlink_ext_ack *extack) { + struct ice_dpll_pin *p = pin_priv; struct ice_dpll *d = dpll_priv; struct ice_pf *pf = d->pf; mutex_lock(&pf->dplls.lock); - if (d->active_input == pin) + if (d->active_input == pin || (p->input && + d->active_input == p->input->pin)) *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; + else if (d->phase_offset_monitor_period) + *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; else *phase_offset = 0; mutex_unlock(&pf->dplls.lock); @@ -1315,6 +2038,76 @@ ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv, } /** + * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @freq: requested embedded sync frequency + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting embedded sync frequency value + * on SW pin. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + + if (!p->active) { + NL_SET_ERR_MSG(extack, "pin is not active"); + return -EINVAL; + } + if (p->direction == DPLL_PIN_DIRECTION_INPUT) + return ice_dpll_input_esync_set(p->input->pin, p->input, dpll, + dpll_priv, freq, extack); + else + return ice_dpll_output_esync_set(p->output->pin, p->output, + dpll, dpll_priv, freq, extack); +} + +/** + * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @esync: on success holds embedded sync frequency and properties + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting embedded sync frequency value + * of SW pin. + * + * Context: Calls a function which acquires and releases pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + + if (p->direction == DPLL_PIN_DIRECTION_INPUT) + return ice_dpll_input_esync_get(p->input->pin, p->input, dpll, + dpll_priv, esync, extack); + else + return ice_dpll_output_esync_get(p->output->pin, p->output, + dpll, dpll_priv, esync, + extack); +} + +/** * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin * @pin: pointer to a pin * @pin_priv: private data pointer passed on pin registration @@ -1427,6 +2220,35 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = { .direction_get = ice_dpll_input_direction, }; +static const struct dpll_pin_ops ice_dpll_pin_sma_ops = { + .state_on_dpll_set = ice_dpll_sma_pin_state_set, + .state_on_dpll_get = ice_dpll_sw_pin_state_get, + .direction_get = ice_dpll_pin_sw_direction_get, + .direction_set = ice_dpll_pin_sma_direction_set, + .prio_get = ice_dpll_sw_input_prio_get, + .prio_set = ice_dpll_sw_input_prio_set, + .frequency_get = ice_dpll_sw_pin_frequency_get, + .frequency_set = ice_dpll_sw_pin_frequency_set, + .phase_adjust_get = ice_dpll_sw_phase_adjust_get, + .phase_adjust_set = ice_dpll_sw_phase_adjust_set, + .phase_offset_get = ice_dpll_phase_offset_get, + .esync_set = ice_dpll_sw_esync_set, + .esync_get = ice_dpll_sw_esync_get, +}; + +static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = { + .state_on_dpll_set = ice_dpll_ufl_pin_state_set, + .state_on_dpll_get = ice_dpll_sw_pin_state_get, + .direction_get = ice_dpll_pin_sw_direction_get, + .frequency_get = ice_dpll_sw_pin_frequency_get, + .frequency_set = ice_dpll_sw_pin_frequency_set, + .esync_set = ice_dpll_sw_esync_set, + .esync_get = ice_dpll_sw_esync_get, + .phase_adjust_get = ice_dpll_sw_phase_adjust_get, + .phase_adjust_set = ice_dpll_sw_phase_adjust_set, + .phase_offset_get = ice_dpll_phase_offset_get, +}; + static const struct dpll_pin_ops ice_dpll_input_ops = { .frequency_get = ice_dpll_input_frequency_get, .frequency_set = ice_dpll_input_frequency_set, @@ -1459,6 +2281,13 @@ static const struct dpll_device_ops ice_dpll_ops = { .mode_get = ice_dpll_mode_get, }; +static const struct dpll_device_ops ice_dpll_pom_ops = { + .lock_status_get = ice_dpll_lock_status_get, + .mode_get = ice_dpll_mode_get, + .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set, + .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get, +}; + /** * ice_generate_clock_id - generates unique clock_id for registering dpll. * @pf: board private structure @@ -1504,6 +2333,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d) } /** + * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor + * @pf: pf private structure + * + * Check if firmware is capable of supporting admin command to provide + * phase offset monitoring on all the input pins on PPS dpll. + * + * Returns: + * * true - PPS dpll phase offset monitoring is supported + * * false - PPS dpll phase offset monitoring is not supported + */ +static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf) +{ + struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM]; + int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas, + ARRAY_SIZE(meas)); + + if (ret && pf->hw.adminq.sq_last_status == ICE_AQ_RC_ESRCH) + return false; + + return true; +} + +/** + * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes + * @pins: array of ice_dpll_pin pointers registered within dpll subsystem + * @pin_num: number of pins + * @phase_offset_ntf_mask: bitmask of pin indexes to notify + * + * Iterate over array of pins and call dpll subsystem pin notify if + * corresponding pin index within bitmask is set. + * + * Context: Must be called while pf->dplls.lock is released. + */ +static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins, + u8 pin_num, + u32 phase_offset_ntf_mask) +{ + int i = 0; + + for (i = 0; i < pin_num; i++) + if (phase_offset_ntf_mask & (1 << i)) + dpll_pin_change_ntf(pins[i].pin); +} + +/** + * ice_dpll_pps_update_phase_offsets - update phase offset measurements + * @pf: pf private structure + * @phase_offset_pins_updated: returns mask of updated input pin indexes + * + * Read phase offset measurements for PPS dpll device and store values in + * input pins array. On success phase_offset_pins_updated - fills bitmask of + * updated input pin indexes, pins shall be notified. + * + * Context: Shall be called with pf->dplls.lock being locked. + * Returns: + * * 0 - success or no data available + * * negative - AQ failure + */ +static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf, + u32 *phase_offset_pins_updated) +{ + struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM]; + struct ice_dpll_pin *p; + s64 phase_offset, tmp; + int i, j, ret; + + *phase_offset_pins_updated = 0; + ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas, + ARRAY_SIZE(meas)); + if (ret && pf->hw.adminq.sq_last_status == ICE_AQ_RC_EAGAIN) { + return 0; + } else if (ret) { + dev_err(ice_pf_to_dev(pf), + "failed to get input pin measurements dpll=%d, ret=%d %s\n", + DPLL_TYPE_PPS, ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); + return ret; + } + for (i = 0; i < pf->dplls.num_inputs; i++) { + p = &pf->dplls.inputs[i]; + phase_offset = 0; + for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) { + tmp = meas[i].phase_offset[j]; +#ifdef __LITTLE_ENDIAN + phase_offset += tmp << 8 * j; +#else + phase_offset += tmp << 8 * + (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j); +#endif + } + phase_offset = sign_extend64(phase_offset, 47); + if (p->phase_offset != phase_offset) { + dev_dbg(ice_pf_to_dev(pf), + "phase offset changed for pin:%d old:%llx, new:%llx\n", + p->idx, p->phase_offset, phase_offset); + p->phase_offset = phase_offset; + *phase_offset_pins_updated |= (1 << i); + } + } + + return 0; +} + +/** * ice_dpll_update_state - update dpll state * @pf: pf private structure * @d: pointer to queried dpll device @@ -1589,14 +2522,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work) struct ice_pf *pf = container_of(d, struct ice_pf, dplls); struct ice_dpll *de = &pf->dplls.eec; struct ice_dpll *dp = &pf->dplls.pps; + u32 phase_offset_ntf = 0; int ret = 0; if (ice_is_reset_in_progress(pf->state)) goto resched; mutex_lock(&pf->dplls.lock); + d->periodic_counter++; ret = ice_dpll_update_state(pf, de, false); if (!ret) ret = ice_dpll_update_state(pf, dp, false); + if (!ret && dp->phase_offset_monitor_period && + d->periodic_counter % dp->phase_offset_monitor_period == 0) + ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf); if (ret) { d->cgu_state_acq_err_num++; /* stop rescheduling this worker */ @@ -1611,6 +2549,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work) mutex_unlock(&pf->dplls.lock); ice_dpll_notify_changes(de); ice_dpll_notify_changes(dp); + if (phase_offset_ntf) + ice_dpll_pins_notify_mask(d->inputs, d->num_inputs, + phase_offset_ntf); resched: /* Run twice a second or reschedule if update failed */ @@ -1689,7 +2630,8 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, int i; for (i = 0; i < count; i++) - dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); + if (!pins[i].hidden) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); } /** @@ -1712,16 +2654,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, int ret, i; for (i = 0; i < count; i++) { - ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]); - if (ret) - goto unregister_pins; + if (!pins[i].hidden) { + ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]); + if (ret) + goto unregister_pins; + } } return 0; unregister_pins: while (--i >= 0) - dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); + if (!pins[i].hidden) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); return ret; } @@ -1909,6 +2854,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu) ice_dpll_unregister_pins(de->dpll, outputs, &ice_dpll_output_ops, num_outputs); ice_dpll_release_pins(outputs, num_outputs); + if (!pf->dplls.generic) { + ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_ufl_ops, + pf->dplls.pps.dpll, + pf->dplls.eec.dpll); + ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_sma_ops, + pf->dplls.pps.dpll, + pf->dplls.eec.dpll); + } } } @@ -1926,8 +2883,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu) */ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu) { - u32 rclk_idx; - int ret; + int ret, count; ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0, pf->dplls.num_inputs, @@ -1935,23 +2891,56 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu) pf->dplls.eec.dpll, pf->dplls.pps.dpll); if (ret) return ret; + count = pf->dplls.num_inputs; if (cgu) { ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs, - pf->dplls.num_inputs, + count, pf->dplls.num_outputs, &ice_dpll_output_ops, pf->dplls.eec.dpll, pf->dplls.pps.dpll); if (ret) goto deinit_inputs; + count += pf->dplls.num_outputs; + if (!pf->dplls.generic) { + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma, + count, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_sma_ops, + pf->dplls.eec.dpll, + pf->dplls.pps.dpll); + if (ret) + goto deinit_outputs; + count += ICE_DPLL_PIN_SW_NUM; + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl, + count, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_ufl_ops, + pf->dplls.eec.dpll, + pf->dplls.pps.dpll); + if (ret) + goto deinit_sma; + count += ICE_DPLL_PIN_SW_NUM; + } + } else { + count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM; } - rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id; - ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx, + ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id, &ice_dpll_rclk_ops); if (ret) - goto deinit_outputs; + goto deinit_ufl; return 0; +deinit_ufl: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_ufl_ops, + pf->dplls.pps.dpll, pf->dplls.eec.dpll); +deinit_sma: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma, + ICE_DPLL_PIN_SW_NUM, + &ice_dpll_pin_sma_ops, + pf->dplls.pps.dpll, pf->dplls.eec.dpll); deinit_outputs: ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs, pf->dplls.num_outputs, @@ -1977,7 +2966,7 @@ static void ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu) { if (cgu) - dpll_device_unregister(d->dpll, &ice_dpll_ops, d); + dpll_device_unregister(d->dpll, d->ops, d); dpll_device_put(d->dpll); } @@ -2011,12 +3000,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu, } d->pf = pf; if (cgu) { + const struct dpll_device_ops *ops = &ice_dpll_ops; + + if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf)) + ops = &ice_dpll_pom_ops; ice_dpll_update_state(pf, d, true); - ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d); + ret = dpll_device_register(d->dpll, type, ops, d); if (ret) { dpll_device_put(d->dpll); return ret; } + d->ops = ops; } return 0; @@ -2184,8 +3178,10 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, default: return -EINVAL; } - if (num_pins != ice_cgu_get_num_pins(hw, input)) + if (num_pins != ice_cgu_get_num_pins(hw, input)) { + pf->dplls.generic = true; return ice_dpll_init_info_pins_generic(pf, input); + } for (i = 0; i < num_pins; i++) { caps = 0; @@ -2203,10 +3199,14 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, return ret; caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); + if (ice_dpll_is_sw_pin(pf, i, true)) + pins[i].hidden = true; } else { ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); if (ret) return ret; + if (ice_dpll_is_sw_pin(pf, i, false)) + pins[i].hidden = true; } ice_dpll_phase_range_set(&pins[i].prop.phase_range, phase_adj_max); @@ -2246,6 +3246,89 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf) } /** + * ice_dpll_init_info_sw_pins - initializes software controlled pin information + * @pf: board private structure + * + * Init information for software controlled pins, cache them in + * pf->dplls.sma and pf->dplls.ufl. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info_sw_pins(struct ice_pf *pf) +{ + u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0; + struct ice_dplls *d = &pf->dplls; + struct ice_dpll_pin *pin; + u32 phase_adj_max, caps; + int i, ret; + + if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP) + input_idx_offset = ICE_E810_RCLK_PINS_NUM; + phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max); + caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) { + pin = &d->sma[i]; + pin->idx = i; + pin->prop.type = DPLL_PIN_TYPE_EXT; + pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset; + pin->prop.freq_supported = + ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx, + true, &freq_supp_num); + pin->prop.freq_supported_num = freq_supp_num; + pin->prop.capabilities = + (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE | + DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | + caps); + pin->pf = pf; + pin->prop.board_label = ice_dpll_sw_pin_sma[i]; + pin->input = &d->inputs[pin_abs_idx]; + pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)]; + ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max); + } + for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) { + pin = &d->ufl[i]; + pin->idx = i; + pin->prop.type = DPLL_PIN_TYPE_EXT; + pin->prop.capabilities = caps; + pin->pf = pf; + pin->prop.board_label = ice_dpll_sw_pin_ufl[i]; + if (i == ICE_DPLL_PIN_SW_1_IDX) { + pin->direction = DPLL_PIN_DIRECTION_OUTPUT; + pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i); + pin->prop.freq_supported = + ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx, + false, + &freq_supp_num); + pin->prop.freq_supported_num = freq_supp_num; + pin->input = NULL; + pin->output = &d->outputs[pin_abs_idx]; + } else if (i == ICE_DPLL_PIN_SW_2_IDX) { + pin->direction = DPLL_PIN_DIRECTION_INPUT; + pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + + input_idx_offset; + pin->output = NULL; + pin->input = &d->inputs[pin_abs_idx]; + pin->prop.freq_supported = + ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx, + true, &freq_supp_num); + pin->prop.freq_supported_num = freq_supp_num; + pin->prop.capabilities = + (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | + caps); + } + ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max); + } + ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE, + NULL); + if (ret) + return ret; + + return 0; +} + +/** * ice_dpll_init_pins_info - init pins info wrapper * @pf: board private structure * @pin_type: type of pins being initialized @@ -2265,6 +3348,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type) return ice_dpll_init_info_direct_pins(pf, pin_type); case ICE_DPLL_PIN_TYPE_RCLK_INPUT: return ice_dpll_init_info_rclk_pin(pf); + case ICE_DPLL_PIN_TYPE_SOFTWARE: + return ice_dpll_init_info_sw_pins(pf); default: return -EINVAL; } @@ -2351,6 +3436,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT); if (ret) goto deinit_info; + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE); + if (ret) + goto deinit_info; } ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx, diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h index c320f1bf7d6d..a5a5b61c5115 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.h +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -8,6 +8,18 @@ #define ICE_DPLL_RCLK_NUM_MAX 4 +/** + * enum ice_dpll_pin_sw - enumerate ice software pin indices: + * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin + * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin + * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair + */ +enum ice_dpll_pin_sw { + ICE_DPLL_PIN_SW_1_IDX, + ICE_DPLL_PIN_SW_2_IDX, + ICE_DPLL_PIN_SW_NUM +}; + /** ice_dpll_pin - store info about pins * @pin: dpll pin structure * @pf: pointer to pf, which has registered the dpll_pin @@ -19,6 +31,7 @@ * @prop: pin properties * @freq: current frequency of a pin * @phase_adjust: current phase adjust value + * @phase_offset: monitored phase offset value */ struct ice_dpll_pin { struct dpll_pin *pin; @@ -31,7 +44,13 @@ struct ice_dpll_pin { struct dpll_pin_properties prop; u32 freq; s32 phase_adjust; + struct ice_dpll_pin *input; + struct ice_dpll_pin *output; + enum dpll_pin_direction direction; + s64 phase_offset; u8 status; + bool active; + bool hidden; }; /** ice_dpll - store info required for DPLL control @@ -47,8 +66,10 @@ struct ice_dpll_pin { * @input_prio: priorities of each input * @dpll_state: current dpll sync state * @prev_dpll_state: last dpll sync state + * @phase_offset_monitor_period: period for phase offset monitor read frequency * @active_input: pointer to active input pin * @prev_input: pointer to previous active input pin + * @ops: holds the registered ops */ struct ice_dpll { struct dpll_device *dpll; @@ -64,8 +85,10 @@ struct ice_dpll { enum dpll_lock_status dpll_state; enum dpll_lock_status prev_dpll_state; enum dpll_mode mode; + u32 phase_offset_monitor_period; struct dpll_pin *active_input; struct dpll_pin *prev_input; + const struct dpll_device_ops *ops; }; /** ice_dplls - store info required for CCU (clock controlling unit) @@ -84,6 +107,7 @@ struct ice_dpll { * @clock_id: clock_id of dplls * @input_phase_adj_max: max phase adjust value for an input pins * @output_phase_adj_max: max phase adjust value for an output pins + * @periodic_counter: counter of periodic work executions */ struct ice_dplls { struct kthread_worker *kworker; @@ -93,14 +117,19 @@ struct ice_dplls { struct ice_dpll pps; struct ice_dpll_pin *inputs; struct ice_dpll_pin *outputs; + struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM]; + struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM]; struct ice_dpll_pin rclk; u8 num_inputs; u8 num_outputs; - int cgu_state_acq_err_num; + u8 sma_data; u8 base_rclk_idx; + int cgu_state_acq_err_num; u64 clock_id; s32 input_phase_adj_max; s32 output_phase_adj_max; + u32 periodic_counter; + bool generic; }; #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 6aae03771746..2e4f0969035f 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -508,10 +508,14 @@ err_create_repr: */ int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr = ice_repr_create_vf(vf); struct devlink *devlink = priv_to_devlink(pf); + struct ice_repr *repr; int err; + if (!ice_is_eswitch_mode_switchdev(pf)) + return 0; + + repr = ice_repr_create_vf(vf); if (IS_ERR(repr)) return PTR_ERR(repr); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index bbf9e6fd315b..ea7e8b879b48 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -836,6 +836,15 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data) #endif /* !CONFIG_DYNAMIC_DEBUG */ } +static void ice_get_link_ext_stats(struct net_device *netdev, + struct ethtool_link_ext_stats *stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + stats->link_down_events = pf->link_down_events; +} + static int ice_get_eeprom_len(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -2788,14 +2797,7 @@ done: return err; } -/** - * ice_parse_hdrs - parses headers from RSS hash input - * @nfc: ethtool rxnfc command - * - * This function parses the rxnfc command and returns intended - * header types for RSS configuration - */ -static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) +static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc) { u32 hdrs = ICE_FLOW_SEG_HDR_NONE; @@ -2860,15 +2862,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) return hdrs; } -/** - * ice_parse_hash_flds - parses hash fields from RSS hash input - * @nfc: ethtool rxnfc command - * @symm: true if Symmetric Topelitz is set - * - * This function parses the rxnfc command and returns intended - * hash fields for RSS configuration - */ -static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm) +static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm) { u64 hfld = ICE_HASH_INVALID; @@ -2965,16 +2959,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm) return hfld; } -/** - * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash - * @vsi: the VSI being configured - * @nfc: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - */ static int -ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) +ice_set_rxfh_fields(struct net_device *netdev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_rss_hash_cfg cfg; struct device *dev; @@ -3020,14 +3011,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return 0; } -/** - * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type - * @vsi: the VSI being configured - * @nfc: ethtool rxnfc command - */ -static void -ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) +static int +ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc) { + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct device *dev; u64 hash_flds; @@ -3040,21 +3028,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) if (ice_is_safe_mode(pf)) { dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", vsi->vsi_num); - return; + return 0; } hdrs = ice_parse_hdrs(nfc); if (hdrs == ICE_FLOW_SEG_HDR_NONE) { dev_dbg(dev, "Header type is not valid, vsi num = %d\n", vsi->vsi_num); - return; + return 0; } hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); if (hash_flds == ICE_HASH_INVALID) { dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", vsi->vsi_num); - return; + return 0; } if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA || @@ -3081,6 +3069,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID || hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID) nfc->data |= (u64)RXH_GTP_TEID; + + return 0; } /** @@ -3100,8 +3090,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) return ice_add_fdir_ethtool(vsi, cmd); case ETHTOOL_SRXCLSRLDEL: return ice_del_fdir_ethtool(vsi, cmd); - case ETHTOOL_SRXFH: - return ice_set_rss_hash_opt(vsi, cmd); default: break; } @@ -3144,10 +3132,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs); break; - case ETHTOOL_GRXFH: - ice_get_rss_hash_opt(vsi, cmd); - ret = 0; - break; default: break; } @@ -4784,6 +4768,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_msglevel = ice_set_msglevel, .self_test = ice_self_test, .get_link = ethtool_op_get_link, + .get_link_ext_stats = ice_get_link_ext_stats, .get_eeprom_len = ice_get_eeprom_len, .get_eeprom = ice_get_eeprom, .get_coalesce = ice_get_coalesce, @@ -4806,6 +4791,8 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, + .get_rxfh_fields = ice_get_rxfh_fields, + .set_rxfh_fields = ice_set_rxfh_fields, .get_channels = ice_get_channels, .set_channels = ice_set_channels, .get_ts_info = ice_get_ts_info, diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index d97b751052f2..278e57686274 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -2573,38 +2573,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, * convert its values to their appropriate flow L3, L4 values. */ #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4)) #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP)) #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP)) #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ - ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) + ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6)) #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP)) #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ - (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) + (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP)) #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ - ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) + ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure * @vsi: VF's VSI - * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure + * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow @@ -2621,8 +2621,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) return -EINVAL; vsi_handle = vsi->idx; - if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL; /* Make sure no unsupported bits are specified */ @@ -2658,11 +2657,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; } else if (hash_flds & - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= - ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); + ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP); } } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { @@ -2679,11 +2678,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; } else if (hash_flds & - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= - ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); + ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP); } } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 6cb7bb879c98..52f906d89eca 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -4,6 +4,8 @@ #ifndef _ICE_FLOW_H_ #define _ICE_FLOW_H_ +#include <linux/net/intel/libie/pctype.h> + #include "ice_flex_type.h" #include "ice_parser.h" @@ -264,57 +266,27 @@ enum ice_flow_field { #define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID) -/* Flow headers and fields for AVF support */ -enum ice_flow_avf_hdr_field { - /* Values 0 - 28 are reserved for future use */ - ICE_AVF_FLOW_FIELD_INVALID = 0, - ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29, - ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP, - ICE_AVF_FLOW_FIELD_IPV4_UDP, - ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK, - ICE_AVF_FLOW_FIELD_IPV4_TCP, - ICE_AVF_FLOW_FIELD_IPV4_SCTP, - ICE_AVF_FLOW_FIELD_IPV4_OTHER, - ICE_AVF_FLOW_FIELD_FRAG_IPV4, - /* Values 37-38 are reserved */ - ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39, - ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP, - ICE_AVF_FLOW_FIELD_IPV6_UDP, - ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK, - ICE_AVF_FLOW_FIELD_IPV6_TCP, - ICE_AVF_FLOW_FIELD_IPV6_SCTP, - ICE_AVF_FLOW_FIELD_IPV6_OTHER, - ICE_AVF_FLOW_FIELD_FRAG_IPV6, - ICE_AVF_FLOW_FIELD_RSVD47, - ICE_AVF_FLOW_FIELD_FCOE_OX, - ICE_AVF_FLOW_FIELD_FCOE_RX, - ICE_AVF_FLOW_FIELD_FCOE_OTHER, - /* Values 51-62 are reserved */ - ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63, - ICE_AVF_FLOW_FIELD_MAX -}; - /* Supported RSS offloads This macro is defined to support - * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware + * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware * capabilities to the caller of this ops. */ -#define ICE_DEFAULT_RSS_HENA ( \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ - BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) +#define ICE_DEFAULT_RSS_HASHCFG ( \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) enum ice_rss_cfg_hdr_type { ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 03bb16191237..2f1782e9357f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -484,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) if (!q_vector->tx.tx_ring) return IRQ_HANDLED; -#define FDIR_RX_DESC_CLEAN_BUDGET 64 - ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); + ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring); ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); return IRQ_HANDLED; @@ -1579,7 +1578,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) return; } - status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG); if (status) dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", vsi->vsi_num, status); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0a11b4281092..f8ef80069e3d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1144,6 +1144,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; + if (!link_up && old_link) + pf->link_down_events++; + ice_ptp_link_change(pf, link_up); if (ice_is_dcb_active(pf)) { @@ -4764,7 +4767,6 @@ int ice_init_dev(struct ice_pf *pf) pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; - pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; if (pf->hw.tnl.valid_count[TNL_VXLAN]) { pf->hw.udp_tunnel_nic.tables[0].n_entries = @@ -7933,6 +7935,10 @@ const char *ice_aq_str(enum ice_aq_err aq_err) return "ICE_AQ_RC_EPERM"; case ICE_AQ_RC_ENOENT: return "ICE_AQ_RC_ENOENT"; + case ICE_AQ_RC_ESRCH: + return "ICE_AQ_RC_ESRCH"; + case ICE_AQ_RC_EAGAIN: + return "ICE_AQ_RC_EAGAIN"; case ICE_AQ_RC_ENOMEM: return "ICE_AQ_RC_ENOMEM"; case ICE_AQ_RC_EBUSY: diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 55cad824c5b9..9a2606dcdf48 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -40,21 +40,19 @@ static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { { ONE_PPS, { -1, 5 }, { 0, 1 }}, }; -static const char ice_pin_names_nvm[][64] = { - "GNSS", - "SMA1", - "U.FL1", - "SMA2", - "U.FL2", +static const char ice_pin_names_dpll[][64] = { + "SDP20", + "SDP21", + "SDP22", + "SDP23", }; -static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { +static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = { /* name, gpio, delay */ - { GNSS, { 1, -1 }, { 0, 0 }}, - { SMA1, { 1, 0 }, { 0, 1 }}, - { UFL1, { -1, 0 }, { 0, 1 }}, - { SMA2, { 3, 2 }, { 0, 1 }}, - { UFL2, { 3, -1 }, { 0, 0 }}, + { SDP0, { -1, 0 }, { 0, 1 }}, + { SDP1, { 1, -1 }, { 0, 0 }}, + { SDP2, { -1, 2 }, { 0, 1 }}, + { SDP3, { 3, -1 }, { 0, 0 }}, }; static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) @@ -93,101 +91,6 @@ static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, } /** - * ice_ptp_update_sma_data - update SMA pins data according to pins setup - * @pf: Board private structure - * @sma_pins: parsed SMA pins status - * @data: SMA data to update - */ -static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], - u8 *data) -{ - const char *state1, *state2; - - /* Set the right state based on the desired configuration. - * When bit is set, functionality is disabled. - */ - *data &= ~ICE_ALL_SMA_MASK; - if (!sma_pins[UFL1 - 1]) { - if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { - state1 = "SMA1 Rx, U.FL1 disabled"; - *data |= ICE_SMA1_TX_EN; - } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { - state1 = "SMA1 Tx U.FL1 disabled"; - *data |= ICE_SMA1_DIR_EN; - } else { - state1 = "SMA1 disabled, U.FL1 disabled"; - *data |= ICE_SMA1_MASK; - } - } else { - /* U.FL1 Tx will always enable SMA1 Rx */ - state1 = "SMA1 Rx, U.FL1 Tx"; - } - - if (!sma_pins[UFL2 - 1]) { - if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { - state2 = "SMA2 Rx, U.FL2 disabled"; - *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; - } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { - state2 = "SMA2 Tx, U.FL2 disabled"; - *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; - } else { - state2 = "SMA2 disabled, U.FL2 disabled"; - *data |= ICE_SMA2_MASK; - } - } else { - if (!sma_pins[SMA2 - 1]) { - state2 = "SMA2 disabled, U.FL2 Rx"; - *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; - } else { - state2 = "SMA2 Tx, U.FL2 Rx"; - *data |= ICE_SMA2_DIR_EN; - } - } - - dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); -} - -/** - * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic - * @pf: Board private structure - * - * Return: 0 on success, negative error code otherwise - */ -static int ice_ptp_set_sma_cfg(struct ice_pf *pf) -{ - const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; - struct ptp_pin_desc *pins = pf->ptp.pin_desc; - unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; - int err; - u8 data; - - /* Read initial pin state value */ - err = ice_read_sma_ctrl(&pf->hw, &data); - if (err) - return err; - - /* Get SMA/U.FL pins states */ - for (int i = 0; i < pf->ptp.info.n_pins; i++) - if (pins[i].func) { - int name_idx = ice_pins[i].name_idx; - - switch (name_idx) { - case SMA1: - case UFL1: - case SMA2: - case UFL2: - sma_pins[name_idx - 1] = pins[i].func; - break; - default: - continue; - } - } - - ice_ptp_update_sma_data(pf, sma_pins, &data); - return ice_write_sma_ctrl(&pf->hw, data); -} - -/** * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device * @pf: Board private structure * @@ -1879,63 +1782,6 @@ static void ice_ptp_enable_all_perout(struct ice_pf *pf) } /** - * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO - * @pf: Board private structure - * @pin: Pin index - * @func: Assigned function - * - * Return: 0 on success, negative error code otherwise - */ -static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, - enum ptp_pin_function func) -{ - unsigned int gpio_pin; - - switch (func) { - case PTP_PF_PEROUT: - gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; - break; - case PTP_PF_EXTTS: - gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; - break; - default: - return -EOPNOTSUPP; - } - - for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { - struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; - unsigned int chan = pin_desc->chan; - - /* Skip pin idx from the request */ - if (i == pin) - continue; - - if (pin_desc->func == PTP_PF_PEROUT && - pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { - pf->ptp.perout_rqs[chan].period.sec = 0; - pf->ptp.perout_rqs[chan].period.nsec = 0; - pin_desc->func = PTP_PF_NONE; - pin_desc->chan = 0; - dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", - i, gpio_pin); - return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], - false); - } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && - pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { - pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; - pin_desc->func = PTP_PF_NONE; - pin_desc->chan = 0; - dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", - i, gpio_pin); - return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], - false); - } - } - - return 0; -} - -/** * ice_verify_pin - verify if pin supports requested pin function * @info: the driver's PTP info structure * @pin: Pin index @@ -1969,14 +1815,6 @@ static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, return -EOPNOTSUPP; } - /* On adapters with SMA_CTRL disable other pins that share same GPIO */ - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { - ice_ptp_disable_shared_pin(pf, pin, func); - pf->ptp.pin_desc[pin].func = func; - pf->ptp.pin_desc[pin].chan = chan; - return ice_ptp_set_sma_cfg(pf); - } - return 0; } @@ -2500,14 +2338,14 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; - const char *name = NULL; + const char *name; if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) name = ice_pin_names[desc->name_idx]; - else if (desc->name_idx != GPIO_NA) - name = ice_pin_names_nvm[desc->name_idx]; - if (name) - strscpy(pin->name, name, sizeof(pin->name)); + else + name = ice_pin_names_dpll[desc->name_idx]; + + strscpy(pin->name, name, sizeof(pin->name)); pin->index = i; } @@ -2519,8 +2357,8 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) * ice_ptp_disable_pins - Disable PTP pins * @pf: pointer to the PF structure * - * Disable the OS access to the SMA pins. Called to clear out the OS - * indications of pin support when we fail to setup the SMA control register. + * Disable the OS access to the pins. Called to clear out the OS + * indications of pin support when we fail to setup pin array. */ static void ice_ptp_disable_pins(struct ice_pf *pf) { @@ -2561,40 +2399,30 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, for (i = 0; i < num_entries; i++) { u16 entry = le16_to_cpu(entries[i]); DECLARE_BITMAP(bitmap, GPIO_NA); - unsigned int bitmap_idx; + unsigned int idx; bool dir; u16 gpio; *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); + + /* Check if entry's pin bitmap is valid. */ + if (bitmap_empty(bitmap, GPIO_NA)) + continue; + dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); - for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { - unsigned int idx; - - /* Check if entry's pin bit is valid */ - if (bitmap_idx >= NUM_PTP_PINS_NVM && - bitmap_idx != GPIO_NA) - continue; - /* Check if pin already exists */ - for (idx = 0; idx < ICE_N_PINS_MAX; idx++) - if (pins[idx].name_idx == bitmap_idx) - break; - - if (idx == ICE_N_PINS_MAX) { - /* Pin not found, setup its entry and name */ - idx = n_pins++; - pins[idx].name_idx = bitmap_idx; - if (bitmap_idx == GPIO_NA) - strscpy(pf->ptp.pin_desc[idx].name, - ice_pin_names[gpio], - sizeof(pf->ptp.pin_desc[idx] - .name)); - } + for (idx = 0; idx < ICE_N_PINS_MAX; idx++) { + if (pins[idx].name_idx == gpio) + break; + } - /* Setup in/out GPIO number */ - pins[idx].gpio[dir] = gpio; + if (idx == ICE_N_PINS_MAX) { + /* Pin not found, setup its entry and name */ + idx = n_pins++; + pins[idx].name_idx = gpio; } + pins[idx].gpio[dir] = gpio; } for (i = 0; i < n_pins; i++) { @@ -2622,10 +2450,10 @@ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { pf->ptp.ice_pin_desc = ice_pin_desc_e825c; - pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); + pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c); } else { pf->ptp.ice_pin_desc = ice_pin_desc_e82x; - pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); + pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x); } ice_ptp_setup_pin_cfg(pf); } @@ -2651,15 +2479,13 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf) if (err) { /* SDP section does not exist in NVM or is corrupted */ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { - ptp->ice_pin_desc = ice_pin_desc_e810_sma; - ptp->info.n_pins = - ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); + ptp->ice_pin_desc = ice_pin_desc_dpll; + ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll); } else { pf->ptp.ice_pin_desc = ice_pin_desc_e810; - pf->ptp.info.n_pins = - ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); - err = 0; + pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810); } + err = 0; } else { desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, sizeof(struct ice_ptp_pin_desc), @@ -2677,8 +2503,6 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf) ptp->info.pin_config = ptp->pin_desc; ice_ptp_setup_pin_cfg(pf); - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) - err = ice_ptp_set_sma_cfg(pf); err: if (err) { devm_kfree(ice_pf_to_dev(pf), desc); @@ -2704,7 +2528,7 @@ static void ice_ptp_set_funcs_e830(struct ice_pf *pf) #endif /* CONFIG_ICE_HWTS */ /* Rest of the config is the same as base E810 */ pf->ptp.ice_pin_desc = ice_pin_desc_e810; - pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810); ice_ptp_setup_pin_cfg(pf); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 3b769a0cad00..c8dac5a5bcd9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -202,9 +202,6 @@ enum ice_ptp_pin_nvm { /* Pin definitions for PTP */ #define ICE_N_PINS_MAX 6 -#define ICE_SMA_PINS_NUM 4 -#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \ - sizeof(struct ice_ptp_pin_desc)) /** * struct ice_ptp_pin_desc - hardware pin description data diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 83f20fa7ace7..657ca1b3bf70 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -704,6 +704,7 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw) #define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN) #define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \ ICE_SMA2_TX_EN) +#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN) #define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK) #define ICE_SMA_MIN_BIT 3 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 0e5107fe62ad..29e0088ab6b2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -20,7 +20,6 @@ #define ICE_RX_HDR_SIZE 256 -#define FDIR_DESC_RXDID 0x40 #define ICE_FDIR_CLEAN_DELAY 10 /** @@ -707,6 +706,37 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) } /** + * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. + * @rx_ring: ring to init descriptors on + * @count: number of descriptors to initialize + */ +void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) +{ + union ice_32b_rx_flex_desc *rx_desc; + u32 ntu = rx_ring->next_to_use; + + if (!count) + return; + + rx_desc = ICE_RX_DESC(rx_ring, ntu); + + do { + rx_desc++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = ICE_RX_DESC(rx_ring, 0); + ntu = 0; + } + + rx_desc->wb.status_error0 = 0; + count--; + } while (count); + + if (rx_ring->next_to_use != ntu) + ice_release_rx_desc(rx_ring, ntu); +} + +/** * ice_alloc_rx_bufs - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace @@ -726,8 +756,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) struct ice_rx_buf *bi; /* do nothing if no valid netdev defined */ - if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || - !cleaned_count) + if (!rx_ring->netdev || !cleaned_count) return false; /* get the Rx descriptor and buffer based on next_to_use */ @@ -1184,6 +1213,45 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, } /** + * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring + * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on + * + * This function cleans Rx descriptors from the ctrl_vsi Rx ring used + * to set flow director rules on VFs. + */ +void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean; + unsigned int total_rx_pkts = 0; + u32 cnt = rx_ring->count; + + while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) { + struct ice_vsi *ctrl_vsi = rx_ring->vsi; + union ice_32b_rx_flex_desc *rx_desc; + u16 stat_err_bits; + + rx_desc = ICE_RX_DESC(rx_ring, ntc); + + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); + if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) + break; + + dma_rmb(); + + if (ctrl_vsi->vf) + ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); + + if (++ntc == cnt) + ntc = 0; + total_rx_pkts++; + } + + rx_ring->first_desc = ntc; + rx_ring->next_to_clean = ntc; + ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); +} + +/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1195,7 +1263,7 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, * * Returns amount of work completed */ -int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) +static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; @@ -1242,17 +1310,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) dma_rmb(); ice_trace(clean_rx_irq, rx_ring, rx_desc); - if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { - struct ice_vsi *ctrl_vsi = rx_ring->vsi; - - if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && - ctrl_vsi->vf) - ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); - if (++ntc == cnt) - ntc = 0; - rx_ring->first_desc = ntc; - continue; - } size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index a4b1e9514632..fef750c5f288 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -491,6 +491,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) union ice_32b_rx_flex_desc; +void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 @@ -506,6 +507,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget); int ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, u8 *raw_packet); -int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget); void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); +void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring); #endif /* _ICE_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index eeeb9968e477..24426dcd8aa2 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2999,13 +2999,13 @@ error_param: } /** - * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware + * ice_vc_get_rss_hashcfg - return the RSS Hash configuration * @vf: pointer to the VF info */ -static int ice_vc_get_rss_hena(struct ice_vf *vf) +static int ice_vc_get_rss_hashcfg(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct virtchnl_rss_hena *vrh = NULL; + struct virtchnl_rss_hashcfg *vrh = NULL; int len = 0, ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -3019,7 +3019,7 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf) goto err; } - len = sizeof(struct virtchnl_rss_hena); + len = sizeof(struct virtchnl_rss_hashcfg); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; @@ -3027,23 +3027,23 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf) goto err; } - vrh->hena = ICE_DEFAULT_RSS_HENA; + vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG; err: /* send the response back to the VF */ - ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret, + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret, (u8 *)vrh, len); kfree(vrh); return ret; } /** - * ice_vc_set_rss_hena - set RSS HENA bits for the VF + * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF * @vf: pointer to the VF info * @msg: pointer to the msg buffer */ -static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) +static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg) { - struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; @@ -3074,9 +3074,9 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) * disable RSS */ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); - if (status && !vrh->hena) { + if (status && !vrh->hashcfg) { /* only report failure to clear the current RSS configuration if - * that was clearly the VF's intention (i.e. vrh->hena = 0) + * that was clearly the VF's intention (i.e. vrh->hashcfg = 0) */ v_ret = ice_err_to_virt_err(status); goto err; @@ -3089,14 +3089,14 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) vf->vf_id); } - if (vrh->hena) { - status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena); + if (vrh->hashcfg) { + status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg); v_ret = ice_err_to_virt_err(status); } /* send the response to the VF */ err: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret, NULL, 0); } @@ -4243,8 +4243,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .add_vlan_msg = ice_vc_add_vlan_msg, .remove_vlan_msg = ice_vc_remove_vlan_msg, .query_rxdid = ice_vc_query_rxdid, - .get_rss_hena = ice_vc_get_rss_hena, - .set_rss_hena_msg = ice_vc_set_rss_hena, + .get_rss_hashcfg = ice_vc_get_rss_hashcfg, + .set_rss_hashcfg = ice_vc_set_rss_hashcfg, .ena_vlan_stripping = ice_vc_ena_vlan_stripping, .dis_vlan_stripping = ice_vc_dis_vlan_stripping, .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, @@ -4380,8 +4380,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .add_vlan_msg = ice_vc_add_vlan_msg, .remove_vlan_msg = ice_vc_remove_vlan_msg, .query_rxdid = ice_vc_query_rxdid, - .get_rss_hena = ice_vc_get_rss_hena, - .set_rss_hena_msg = ice_vc_set_rss_hena, + .get_rss_hashcfg = ice_vc_get_rss_hashcfg, + .set_rss_hashcfg = ice_vc_set_rss_hashcfg, .ena_vlan_stripping = ice_vc_ena_vlan_stripping, .dis_vlan_stripping = ice_vc_dis_vlan_stripping, .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, @@ -4582,11 +4582,11 @@ error_handler: case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: err = ops->query_rxdid(vf); break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - err = ops->get_rss_hena(vf); + case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: + err = ops->get_rss_hashcfg(vf); break; - case VIRTCHNL_OP_SET_RSS_HENA: - err = ops->set_rss_hena_msg(vf, msg); + case VIRTCHNL_OP_SET_RSS_HASHCFG: + err = ops->set_rss_hashcfg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: err = ops->ena_vlan_stripping(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index 222990f229d5..b3eece8c6780 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -57,8 +57,8 @@ struct ice_virtchnl_ops { int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); int (*query_rxdid)(struct ice_vf *vf); - int (*get_rss_hena)(struct ice_vf *vf); - int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg); + int (*get_rss_hashcfg)(struct ice_vf *vf); + int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg); int (*ena_vlan_stripping)(struct ice_vf *vf); int (*dis_vlan_stripping)(struct ice_vf *vf); int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index a3d1579a619a..4c2ec2337b38 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = { /* VIRTCHNL_VF_OFFLOAD_RSS_PF */ static const u32 rss_pf_allowlist_opcodes[] = { VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT, - VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA, + VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG, VIRTCHNL_OP_CONFIG_RSS_HFUNC, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 993c354aa27a..555879b1248d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1006,7 +1006,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 5cf440e09d0a..cef9dfb877e8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -383,12 +383,12 @@ err_out: */ static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { - if (unlikely(!rx_buf->page)) + if (unlikely(!rx_buf->netmem)) return; - page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); + libeth_rx_recycle_slow(rx_buf->netmem); - rx_buf->page = NULL; + rx_buf->netmem = 0; rx_buf->offset = 0; } @@ -3240,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { - u32 hr = rx_buf->page->pp->p.offset; + u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->offset + hr, size, rx_buf->truesize); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem, + rx_buf->offset + hr, size, rx_buf->truesize); } /** @@ -3266,16 +3266,20 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, struct libeth_fqe *buf, u32 data_len) { u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + struct page *hdr_page, *buf_page; const void *src; void *dst; - if (!libeth_rx_sync_for_cpu(buf, copy)) + if (unlikely(netmem_is_net_iov(buf->netmem)) || + !libeth_rx_sync_for_cpu(buf, copy)) return 0; - dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; - src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; - memcpy(dst, src, LARGEST_ALIGN(copy)); + hdr_page = __netmem_to_page(hdr->netmem); + buf_page = __netmem_to_page(buf->netmem); + dst = page_address(hdr_page) + hdr->offset + hdr_page->pp->p.offset; + src = page_address(buf_page) + buf->offset + buf_page->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); buf->offset += copy; return copy; @@ -3291,11 +3295,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, */ struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { - u32 hr = buf->page->pp->p.offset; + struct page *buf_page = __netmem_to_page(buf->netmem); + u32 hr = buf_page->pp->p.offset; struct sk_buff *skb; void *va; - va = page_address(buf->page) + buf->offset; + va = page_address(buf_page) + buf->offset; prefetch(va + hr); skb = napi_build_skb(va, buf->truesize); @@ -3429,7 +3434,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) if (unlikely(!hdr_len && !skb)) { hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); - pkt_len -= hdr_len; + /* If failed, drop both buffers by setting len to 0 */ + pkt_len -= hdr_len ? : pkt_len; u64_stats_update_begin(&rxq->stats_sync); u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); @@ -3446,7 +3452,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) u64_stats_update_end(&rxq->stats_sync); } - hdr->page = NULL; + hdr->netmem = 0; payload: if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) @@ -3462,7 +3468,7 @@ payload: break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index ca6ccbc13954..92ef33459aec 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2500,9 +2500,11 @@ static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, return 0; } -static int igb_get_rss_hash_opts(struct igb_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igb_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct igb_adapter *adapter = netdev_priv(dev); + cmd->data = 0; /* Report default options for RSS on igb */ @@ -2563,9 +2565,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); break; - case ETHTOOL_GRXFH: - ret = igb_get_rss_hash_opts(adapter, cmd); - break; default: break; } @@ -2575,9 +2574,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, #define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ IGB_FLAG_RSS_FIELD_IPV6_UDP) -static int igb_set_rss_hash_opt(struct igb_adapter *adapter, - struct ethtool_rxnfc *nfc) +static int igb_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct igb_adapter *adapter = netdev_priv(dev); u32 flags = adapter->flags; /* RSS does not support anything other than hashing @@ -3005,9 +3006,6 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = igb_set_rss_hash_opt(adapter, cmd); - break; case ETHTOOL_SRXCLSRLINS: ret = igb_add_ethtool_nfc_entry(adapter, cmd); break; @@ -3485,6 +3483,8 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_rxfh_indir_size = igb_get_rxfh_indir_size, .get_rxfh = igb_get_rxfh, .set_rxfh = igb_set_rxfh, + .get_rxfh_fields = igb_get_rxfh_fields, + .set_rxfh_fields = igb_set_rxfh_fields, .get_channels = igb_get_channels, .set_channels = igb_set_channels, .get_priv_flags = igb_get_priv_flags, diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 859a15e4ccba..1525ae25fd3e 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -43,6 +43,7 @@ void igc_ethtool_set_ops(struct net_device *); struct igc_fpe_t { struct ethtool_mmsv mmsv; u32 tx_min_frag_size; + bool tx_enabled; }; enum igc_mac_filter_type { @@ -163,6 +164,7 @@ struct igc_ring { bool launchtime_enable; /* true if LaunchTime is enabled */ ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + bool preemptible; /* True if preemptible queue, false if express queue */ u32 start_time; u32 end_time; @@ -395,6 +397,7 @@ extern char igc_driver_name[]; #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) #define IGC_FLAG_TSN_QAV_ENABLED BIT(18) #define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19) +#define IGC_FLAG_TSN_REVERSE_TXQ_PRIO BIT(20) #define IGC_FLAG_TSN_ANY_ENABLED \ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \ @@ -485,12 +488,30 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) * descriptors until either it has this many to write back, or the * ITR timer expires. */ -#define IGC_RX_PTHRESH 8 -#define IGC_RX_HTHRESH 8 -#define IGC_TX_PTHRESH 8 -#define IGC_TX_HTHRESH 1 -#define IGC_RX_WTHRESH 4 -#define IGC_TX_WTHRESH 16 +#define IGC_RXDCTL_PTHRESH 8 +#define IGC_RXDCTL_HTHRESH 8 +#define IGC_RXDCTL_WTHRESH 4 +/* Ena specific Rx Queue */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 +/* Receive Software Flush */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 + +#define IGC_TXDCTL_PTHRESH_MASK GENMASK(4, 0) +#define IGC_TXDCTL_HTHRESH_MASK GENMASK(12, 8) +#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16) +#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25) +#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26) +#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27) + +#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x)) +#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x)) +#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x)) +/* Ena specific Tx Queue */ +#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1) +/* Transmit Software Flush */ +#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1) +#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x)) +#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1) #define IGC_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 6320eabb72fe..eaf17cd031c3 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -86,14 +86,6 @@ union igc_adv_rx_desc { } wb; /* writeback */ }; -/* Additional Transmit Descriptor Control definitions */ -#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ -#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ - -/* Additional Receive Descriptor Control definitions */ -#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ -#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ - /* SRRCTL bit definitions */ #define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) #define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 7189dfc389ad..86b346687196 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -588,6 +588,7 @@ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 #define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_PREEMPTIBLE 0x00000008 #define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 #define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 #define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 3fc1eded9605..a7f397b58cd6 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -122,9 +122,11 @@ static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { #define IGC_STATS_LEN \ (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) +#define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1) static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", + "reverse-tsn-txq-prio", }; #define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) @@ -1045,9 +1047,11 @@ static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, return 0; } -static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int igc_ethtool_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct igc_adapter *adapter = netdev_priv(dev); + cmd->data = 0; /* Report default options for RSS on igc */ @@ -1103,8 +1107,6 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev, return igc_ethtool_get_nfc_rule(adapter, cmd); case ETHTOOL_GRXCLSRLALL: return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); - case ETHTOOL_GRXFH: - return igc_ethtool_get_rss_hash_opts(adapter, cmd); default: return -EOPNOTSUPP; } @@ -1112,9 +1114,11 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev, #define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ IGC_FLAG_RSS_FIELD_IPV6_UDP) -static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, - struct ethtool_rxnfc *nfc) +static int igc_ethtool_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct igc_adapter *adapter = netdev_priv(dev); u32 flags = adapter->flags; /* RSS does not support anything other than hashing @@ -1425,8 +1429,6 @@ static int igc_ethtool_set_rxnfc(struct net_device *dev, struct igc_adapter *adapter = netdev_priv(dev); switch (cmd->cmd) { - case ETHTOOL_SRXFH: - return igc_ethtool_set_rss_hash_opt(adapter, cmd); case ETHTOOL_SRXCLSRLINS: return igc_ethtool_add_nfc_rule(adapter, cmd); case ETHTOOL_SRXCLSRLDEL: @@ -1600,6 +1602,9 @@ static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) if (adapter->flags & IGC_FLAG_RX_LEGACY) priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO) + priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO; + return priv_flags; } @@ -1608,10 +1613,13 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) struct igc_adapter *adapter = netdev_priv(netdev); unsigned int flags = adapter->flags; - flags &= ~IGC_FLAG_RX_LEGACY; + flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO); if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) flags |= IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO) + flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO; + if (flags != adapter->flags) { adapter->flags = flags; @@ -2144,6 +2152,8 @@ static const struct ethtool_ops igc_ethtool_ops = { .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, .get_rxfh = igc_ethtool_get_rxfh, .set_rxfh = igc_ethtool_set_rxfh, + .get_rxfh_fields = igc_ethtool_get_rxfh_fields, + .set_rxfh_fields = igc_ethtool_set_rxfh_fields, .get_ts_info = igc_ethtool_get_ts_info, .get_channels = igc_ethtool_get_channels, .set_channels = igc_ethtool_set_channels, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 686793c539f2..2e12915b42a9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -683,9 +683,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter, wr32(IGC_SRRCTL(reg_idx), srrctl); - rxdctl |= IGC_RX_PTHRESH; - rxdctl |= IGC_RX_HTHRESH << 8; - rxdctl |= IGC_RX_WTHRESH << 16; + rxdctl |= IGC_RXDCTL_PTHRESH; + rxdctl |= IGC_RXDCTL_HTHRESH << 8; + rxdctl |= IGC_RXDCTL_WTHRESH << 16; /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, @@ -749,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter, wr32(IGC_TDH(reg_idx), 0); writel(0, ring->tail); - txdctl |= IGC_TX_PTHRESH; - txdctl |= IGC_TX_HTHRESH << 8; - txdctl |= IGC_TX_WTHRESH << 16; + txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) | + IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE; - txdctl |= IGC_TXDCTL_QUEUE_ENABLE; wr32(IGC_TXDCTL(reg_idx), txdctl); } @@ -1687,6 +1685,15 @@ done: first->tx_flags = tx_flags; first->protocol = protocol; + /* For preemptible queue, manually pad the skb so that HW includes + * padding bytes in mCRC calculation + */ + if (tx_ring->preemptible && skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out_drop; + skb_put(skb, ETH_ZLEN - skb->len); + } + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); if (tso < 0) goto out_drop; @@ -6423,6 +6430,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter) ring->start_time = 0; ring->end_time = NSEC_PER_SEC; ring->max_sdu = 0; + ring->preemptible = false; } spin_lock_irqsave(&adapter->qbv_tx_lock, flags); @@ -6488,9 +6496,12 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, if (!validate_schedule(adapter, qopt)) return -EINVAL; - /* preemptible isn't supported yet */ - if (qopt->mqprio.preemptible_tcs) - return -EOPNOTSUPP; + if (qopt->mqprio.preemptible_tcs && + !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) { + NL_SET_ERR_MSG_MOD(qopt->extack, + "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc"); + return -ENODEV; + } igc_ptp_read(adapter, &now); @@ -6583,6 +6594,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, ring->max_sdu = 0; } + igc_fpe_save_preempt_queue(adapter, &qopt->mqprio); + return 0; } @@ -6702,7 +6715,8 @@ static int igc_tc_query_caps(struct igc_adapter *adapter, case TC_SETUP_QDISC_TAPRIO: { struct tc_taprio_caps *caps = base->caps; - caps->broken_mqprio = true; + if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) + caps->broken_mqprio = true; if (hw->mac.type == igc_i225) { caps->supports_queue_max_sdu = true; @@ -6728,6 +6742,20 @@ static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc, adapter->queue_per_tc[i] = offset[i]; } +static bool +igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio) +{ + int num_tc = mqprio->qopt.num_tc; + int i; + + for (i = 1; i < num_tc; i++) { + if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i]) + return false; + } + + return true; +} + static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio) { @@ -6739,6 +6767,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, if (!mqprio->qopt.num_tc) { adapter->strict_priority_enable = false; + igc_fpe_clear_preempt_queue(adapter); netdev_reset_tc(adapter->netdev); goto apply; } @@ -6760,10 +6789,9 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, } } - /* Preemption is not supported yet. */ - if (mqprio->preemptible_tcs) { + if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) { NL_SET_ERR_MSG_MOD(mqprio->extack, - "Preemption is not supported yet"); + "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)"); return -EOPNOTSUPP; } @@ -6786,6 +6814,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, adapter->queue_per_tc[i] = i; mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + igc_fpe_save_preempt_queue(adapter, mqprio); apply: return igc_tsn_offload_apply(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index f22cc4d4f459..8a110145bfee 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -13,6 +13,13 @@ #define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \ (MAX_MULTPLIER_TX_MIN_FRAG + 1)) +enum tx_queue { + TX_QUEUE_0 = 0, + TX_QUEUE_1, + TX_QUEUE_2, + TX_QUEUE_3, +}; + DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled); static int igc_fpe_init_smd_frame(struct igc_ring *ring, @@ -109,6 +116,18 @@ static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter, return err; } +static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable) +{ + struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv); + struct igc_adapter *adapter; + + adapter = container_of(fpe, struct igc_adapter, fpe); + adapter->fpe.tx_enabled = tx_enable; + + /* Update config since tx_enabled affects preemptible queue configuration */ + igc_tsn_offload_apply(adapter); +} + static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv, enum ethtool_mpacket type) { @@ -130,15 +149,59 @@ static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv, } static const struct ethtool_mmsv_ops igc_mmsv_ops = { + .configure_tx = igc_fpe_configure_tx, .send_mpacket = igc_fpe_send_mpacket, }; void igc_fpe_init(struct igc_adapter *adapter) { adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE; + adapter->fpe.tx_enabled = false; ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops); } +void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter) +{ + for (int i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + tx_ring->preemptible = false; + } +} + +static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter, + unsigned long preemptible_tcs) +{ + struct net_device *dev = adapter->netdev; + u32 i, queue = 0; + + for (i = 0; i < dev->num_tc; i++) { + u32 offset, count; + + if (!(preemptible_tcs & BIT(i))) + continue; + + offset = dev->tc_to_txq[i].offset; + count = dev->tc_to_txq[i].count; + queue |= GENMASK(offset + count - 1, offset); + } + + return queue; +} + +void igc_fpe_save_preempt_queue(struct igc_adapter *adapter, + const struct tc_mqprio_qopt_offload *mqprio) +{ + u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter, + mqprio->preemptible_tcs); + + for (int i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + tx_ring->preemptible = !!(preemptible_queue & BIT(i)); + } +} + static bool is_any_launchtime(struct igc_adapter *adapter) { int i; @@ -238,7 +301,7 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter) adapter->taprio_offload_enable; } -static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc) +static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio) { struct igc_hw *hw = &adapter->hw; u32 txarb; @@ -250,10 +313,17 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc) IGC_TXARB_TXQ_PRIO_2_MASK | IGC_TXARB_TXQ_PRIO_3_MASK); - txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]); - txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]); - txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]); - txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]); + if (reverse_prio) { + txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3); + txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2); + txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1); + txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0); + } else { + txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0); + txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1); + txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2); + txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3); + } wr32(IGC_TXARB, txarb); } @@ -286,7 +356,6 @@ static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter, */ static int igc_tsn_disable_offload(struct igc_adapter *adapter) { - u16 queue_per_tc[4] = { 3, 2, 1, 0 }; struct igc_hw *hw = &adapter->hw; u32 tqavctrl; int i; @@ -308,9 +377,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { + int reg_idx = adapter->tx_ring[i]->reg_idx; + u32 txdctl; + wr32(IGC_TXQCTL(i), 0); wr32(IGC_STQT(i), 0); wr32(IGC_ENDQT(i), NSEC_PER_SEC); + + txdctl = rd32(IGC_TXDCTL(reg_idx)); + txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH; + wr32(IGC_TXDCTL(reg_idx), txdctl); } wr32(IGC_QBVCYCLET_S, 0); @@ -319,7 +395,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) /* Restore the default Tx arbitration: Priority 0 has the highest * priority and is assigned to queue 0 and so on and so forth. */ - igc_tsn_tx_arb(adapter, queue_per_tc); + igc_tsn_tx_arb(adapter, false); adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; @@ -355,7 +431,7 @@ static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe) u32 igc_fpe_get_supported_frag_size(u32 frag_size) { - const u32 supported_sizes[] = {64, 128, 192, 256}; + static const u32 supported_sizes[] = { 64, 128, 192, 256 }; /* Find the smallest supported size that is >= frag_size */ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) { @@ -385,15 +461,13 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (igc_is_device_id_i226(hw)) igc_tsn_set_retx_qbvfullthreshold(adapter); - if (adapter->strict_priority_enable) { - /* Configure queue priorities according to the user provided - * mapping. - */ - igc_tsn_tx_arb(adapter, adapter->queue_per_tc); - } + if (adapter->strict_priority_enable || + adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO) + igc_tsn_tx_arb(adapter, true); for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; + u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx)); u32 txqctl = 0; u16 cbs_value; u32 tqavcc; @@ -427,6 +501,22 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + if (!adapter->fpe.tx_enabled) { + /* fpe inactive: clear both flags */ + txqctl &= ~IGC_TXQCTL_PREEMPTIBLE; + txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH; + } else if (ring->preemptible) { + /* fpe active + preemptible: enable preemptible queue + set low priority */ + txqctl |= IGC_TXQCTL_PREEMPTIBLE; + txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH; + } else { + /* fpe active + express: enable express queue + set high priority */ + txqctl &= ~IGC_TXQCTL_PREEMPTIBLE; + txdctl |= IGC_TXDCTL_PRIORITY_HIGH; + } + + wr32(IGC_TXDCTL(ring->reg_idx), txdctl); + /* Skip configuring CBS for Q2 and Q3 */ if (i > 1) goto skip_cbs; diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index c2a77229207b..a95b893459d7 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -4,6 +4,8 @@ #ifndef _IGC_TSN_H_ #define _IGC_TSN_H_ +#include <net/pkt_sched.h> + #define IGC_RX_MIN_FRAG_SIZE 60 #define SMD_FRAME_SIZE 60 @@ -15,6 +17,9 @@ enum igc_txd_popts_type { DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled); void igc_fpe_init(struct igc_adapter *adapter); +void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter); +void igc_fpe_save_preempt_queue(struct igc_adapter *adapter, + const struct tc_mqprio_qopt_offload *mqprio); u32 igc_fpe_get_supported_frag_size(u32 frag_size); int igc_tsn_offload_apply(struct igc_adapter *adapter); int igc_tsn_reset(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 47311b134a7a..c6772cd2d802 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -752,6 +752,7 @@ struct ixgbe_adapter { bool link_up; unsigned long sfp_poll_time; unsigned long link_check_timeout; + u32 link_down_events; struct timer_list service_timer; struct work_struct service_task; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d8a919ab7027..25c3a09ad7f1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1033,6 +1033,14 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); } +static void ixgbe_get_link_ext_stats(struct net_device *netdev, + struct ethtool_link_ext_stats *stats) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + + stats->link_down_events = adapter->link_down_events; +} + static int ixgbe_get_eeprom_len(struct net_device *netdev) { struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); @@ -2745,9 +2753,11 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, return 0; } -static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) +static int ixgbe_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); + cmd->data = 0; /* Report default options for RSS on ixgbe */ @@ -2817,9 +2827,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); break; - case ETHTOOL_GRXFH: - ret = ixgbe_get_rss_hash_opts(adapter, cmd); - break; default: break; } @@ -3071,9 +3078,11 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) -static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *nfc) +static int ixgbe_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); u32 flags2 = adapter->flags2; /* @@ -3196,9 +3205,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLDEL: ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); break; - case ETHTOOL_SRXFH: - ret = ixgbe_set_rss_hash_opt(adapter, cmd); - break; default: break; } @@ -3719,6 +3725,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_wol = ixgbe_set_wol, .nway_reset = ixgbe_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ext_stats = ixgbe_get_link_ext_stats, .get_eeprom_len = ixgbe_get_eeprom_len, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, @@ -3742,6 +3749,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, + .get_rxfh_fields = ixgbe_get_rxfh_fields, + .set_rxfh_fields = ixgbe_set_rxfh_fields, .get_eee = ixgbe_get_eee, .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, @@ -3764,6 +3773,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = { .set_wol = ixgbe_set_wol_e610, .nway_reset = ixgbe_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ext_stats = ixgbe_get_link_ext_stats, .get_eeprom_len = ixgbe_get_eeprom_len, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, @@ -3787,6 +3797,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = { .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, + .get_rxfh_fields = ixgbe_get_rxfh_fields, + .set_rxfh_fields = ixgbe_set_rxfh_fields, .get_eee = ixgbe_get_eee, .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cba860f0e1f1..6eccfba51fac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7991,6 +7991,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (!netif_carrier_ok(netdev)) return; + adapter->link_down_events++; + /* poll for SFP+ cable when link is down */ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 1d2acdb64f45..7461367a1868 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -20,7 +20,7 @@ static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) struct ixgbe_phy_info *phy = &hw->phy; struct ixgbe_link_info *link = &hw->link; - /* Start with X540 invariants, since so simular */ + /* Start with X540 invariants, since so similar */ ixgbe_get_invariants_X540(hw); if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) @@ -48,7 +48,7 @@ static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; - /* Start with X540 invariants, since so simular */ + /* Start with X540 invariants, since so similar */ ixgbe_get_invariants_X540(hw); if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) @@ -685,7 +685,7 @@ static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) return 0; } -/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the +/** ixgbe_read_iosf_sb_reg_x550 - Reads a value to specified register of the * IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write @@ -847,7 +847,7 @@ static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read + * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * @@ -1253,7 +1253,7 @@ static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) /** * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode - * @hw: pointer t hardware structure + * @hw: pointer to hardware structure * * Returns true if in FW NVM recovery mode. */ @@ -1267,7 +1267,7 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) /** ixgbe_disable_rx_x550 - Disable RX unit * - * Enables the Rx DMA unit for x550 + * Disables the Rx DMA unit for x550 **/ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) { @@ -1754,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. + * SFP not present error is not accepted in the setup MAC link flow. */ if (ret_val == -ENOENT) return 0; @@ -1804,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since - * SFP not present error is not excepted in the setup MAC link flow. + * SFP not present error is not accepted in the setup MAC link flow. */ if (ret_val == -ENOENT) return 0; @@ -2324,7 +2324,7 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, * PHY interrupt is lsc * @is_overtemp: indicate whether an overtemp event encountered * - * Determime if external Base T PHY interrupt cause is high temperature + * Determine if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. **/ static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc, @@ -2669,7 +2669,7 @@ static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (status) return status; - /* If link is not still up, then no setup is necessary so return */ + /* If the link is still not up, no setup is necessary */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; @@ -2768,7 +2768,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * Sends driver version number to firmware through the manageability * block. On success return 0 * else returns -EBUSY when encountering an error acquiring - * semaphore, -EIO when command fails or -ENIVAL when incorrect + * semaphore, -EIO when command fails or -EINVAL when incorrect * params passed. **/ int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, @@ -3175,7 +3175,7 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set - * PHY address. This register field was has only been used for X552. + * PHY address. This register field has only been used for X552. */ if (hw->mac.type == ixgbe_mac_x550em_a && hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { @@ -3735,7 +3735,7 @@ static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * - * Release the SWFW semaphore and puts the shared PHY token as needed + * Release the SWFW semaphore and puts back the shared PHY token as needed */ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) { @@ -3756,7 +3756,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register using the SWFW lock and PHY - * Token. The PHY Token is needed since the MDIO is shared between to MAC + * Token. The PHY Token is needed since the MDIO is shared between two MAC * instances. */ static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig index 480293b71dbc..2445b979c499 100644 --- a/drivers/net/ethernet/intel/libeth/Kconfig +++ b/drivers/net/ethernet/intel/libeth/Kconfig @@ -1,9 +1,15 @@ # SPDX-License-Identifier: GPL-2.0-only -# Copyright (C) 2024 Intel Corporation +# Copyright (C) 2024-2025 Intel Corporation config LIBETH - tristate + tristate "Common Ethernet library (libeth)" if COMPILE_TEST select PAGE_POOL help libeth is a common library containing routines shared between several drivers, but not yet promoted to the generic kernel API. + +config LIBETH_XDP + tristate "Common XDP library (libeth_xdp)" if COMPILE_TEST + select LIBETH + help + XDP and XSk helpers based on libeth hotpath management. diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile index 52492b081132..350bc0b38bad 100644 --- a/drivers/net/ethernet/intel/libeth/Makefile +++ b/drivers/net/ethernet/intel/libeth/Makefile @@ -1,6 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only -# Copyright (C) 2024 Intel Corporation +# Copyright (C) 2024-2025 Intel Corporation obj-$(CONFIG_LIBETH) += libeth.o libeth-y := rx.o +libeth-y += tx.o + +obj-$(CONFIG_LIBETH_XDP) += libeth_xdp.o + +libeth_xdp-y += xdp.o +libeth_xdp-y += xsk.o diff --git a/drivers/net/ethernet/intel/libeth/priv.h b/drivers/net/ethernet/intel/libeth/priv.h new file mode 100644 index 000000000000..9b811d31015c --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/priv.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2025 Intel Corporation */ + +#ifndef __LIBETH_PRIV_H +#define __LIBETH_PRIV_H + +#include <linux/types.h> + +/* XDP */ + +enum xdp_action; +struct libeth_xdp_buff; +struct libeth_xdp_tx_frame; +struct skb_shared_info; +struct xdp_frame_bulk; + +extern const struct xsk_tx_metadata_ops libeth_xsktmo_slow; + +void libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, + u32 count); +u32 libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, enum xdp_action act, + int ret); + +struct libeth_xdp_ops { + void (*bulk)(const struct skb_shared_info *sinfo, + struct xdp_frame_bulk *bq, bool frags); + void (*xsk)(struct libeth_xdp_buff *xdp); +}; + +void libeth_attach_xdp(const struct libeth_xdp_ops *ops); + +static inline void libeth_detach_xdp(void) +{ + libeth_attach_xdp(NULL); +} + +#endif /* __LIBETH_PRIV_H */ diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 66d1d23b8ad2..62521a1f4ec9 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2024 Intel Corporation */ +/* Copyright (C) 2024-2025 Intel Corporation */ + +#define DEFAULT_SYMBOL_NAMESPACE "LIBETH" + +#include <linux/export.h> #include <net/libeth/rx.h> @@ -68,7 +72,7 @@ static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp, static bool libeth_rx_page_pool_params(struct libeth_fq *fq, struct page_pool_params *pp) { - pp->offset = LIBETH_SKB_HEADROOM; + pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM; /* HW-writeable / syncable length per one page */ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset); @@ -155,11 +159,12 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) .dev = napi->dev->dev.parent, .netdev = napi->dev, .napi = napi, - .dma_dir = DMA_FROM_DEVICE, }; struct libeth_fqe *fqes; struct page_pool *pool; - bool ret; + int ret; + + pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; if (!fq->hsplit) ret = libeth_rx_page_pool_params(fq, &pp); @@ -173,20 +178,28 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) return PTR_ERR(pool); fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid); - if (!fqes) + if (!fqes) { + ret = -ENOMEM; goto err_buf; + } + + ret = xdp_reg_page_pool(pool); + if (ret) + goto err_mem; fq->fqes = fqes; fq->pp = pool; return 0; +err_mem: + kvfree(fqes); err_buf: page_pool_destroy(pool); - return -ENOMEM; + return ret; } -EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH"); +EXPORT_SYMBOL_GPL(libeth_rx_fq_create); /** * libeth_rx_fq_destroy - destroy a &page_pool created by libeth @@ -194,22 +207,23 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH"); */ void libeth_rx_fq_destroy(struct libeth_fq *fq) { + xdp_unreg_page_pool(fq->pp); kvfree(fq->fqes); page_pool_destroy(fq->pp); } -EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, "LIBETH"); +EXPORT_SYMBOL_GPL(libeth_rx_fq_destroy); /** - * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context - * @page: page to recycle + * libeth_rx_recycle_slow - recycle libeth netmem + * @netmem: network memory to recycle * * To be used on exceptions or rare cases not requiring fast inline recycling. */ -void libeth_rx_recycle_slow(struct page *page) +void __cold libeth_rx_recycle_slow(netmem_ref netmem) { - page_pool_recycle_direct(page->pp, page); + page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); } -EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, "LIBETH"); +EXPORT_SYMBOL_GPL(libeth_rx_recycle_slow); /* Converting abstract packet type numbers into a software structure with * the packet parameters to do O(1) lookup on Rx. @@ -251,7 +265,7 @@ void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt) pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot]; pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer]; } -EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, "LIBETH"); +EXPORT_SYMBOL_GPL(libeth_rx_pt_gen_hash_type); /* Module */ diff --git a/drivers/net/ethernet/intel/libeth/tx.c b/drivers/net/ethernet/intel/libeth/tx.c new file mode 100644 index 000000000000..e0167f43d2a8 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/tx.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2025 Intel Corporation */ + +#define DEFAULT_SYMBOL_NAMESPACE "LIBETH" + +#include <net/libeth/xdp.h> + +#include "priv.h" + +/* Tx buffer completion */ + +DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk); +DEFINE_STATIC_CALL_NULL(xsk, libeth_xsk_buff_free_slow); + +/** + * libeth_tx_complete_any - perform Tx completion for one SQE of any type + * @sqe: Tx buffer to complete + * @cp: polling params + * + * Can be used to complete both regular and XDP SQEs, for example when + * destroying queues. + * When libeth_xdp is not loaded, XDPSQEs won't be handled. + */ +void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp) +{ + if (sqe->type >= __LIBETH_SQE_XDP_START) + __libeth_xdp_complete_tx(sqe, cp, static_call(bulk), + static_call(xsk)); + else + libeth_tx_complete(sqe, cp); +} +EXPORT_SYMBOL_GPL(libeth_tx_complete_any); + +/* Module */ + +void libeth_attach_xdp(const struct libeth_xdp_ops *ops) +{ + static_call_update(bulk, ops ? ops->bulk : NULL); + static_call_update(xsk, ops ? ops->xsk : NULL); +} +EXPORT_SYMBOL_GPL(libeth_attach_xdp); diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c new file mode 100644 index 000000000000..d4ac027d9584 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/xdp.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2025 Intel Corporation */ + +#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP" + +#include <linux/export.h> + +#include <net/libeth/xdp.h> + +#include "priv.h" + +/* XDPSQ sharing */ + +DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share); +EXPORT_SYMBOL_GPL(libeth_xdpsq_share); + +void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock, + const struct net_device *dev) +{ + bool warn; + + spin_lock_init(&lock->lock); + lock->share = true; + + warn = !static_key_enabled(&libeth_xdpsq_share); + static_branch_inc(&libeth_xdpsq_share); + + if (warn && net_ratelimit()) + netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n"); +} +EXPORT_SYMBOL_GPL(__libeth_xdpsq_get); + +void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock, + const struct net_device *dev) +{ + static_branch_dec(&libeth_xdpsq_share); + + if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit()) + netdev_notice(dev, "XDPSQ sharing disabled\n"); + + lock->share = false; +} +EXPORT_SYMBOL_GPL(__libeth_xdpsq_put); + +void __acquires(&lock->lock) +__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock) +{ + spin_lock(&lock->lock); +} +EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock); + +void __releases(&lock->lock) +__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock) +{ + spin_unlock(&lock->lock); +} +EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock); + +/* XDPSQ clean-up timers */ + +/** + * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer + * @timer: timer to initialize + * @xdpsq: queue this timer belongs to + * @lock: corresponding XDPSQ lock + * @poll: queue polling/completion function + * + * XDPSQ clean-up timers must be set up before using at the queue configuration + * time. Set the required pointers and the cleaning callback. + */ +void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq, + struct libeth_xdpsq_lock *lock, + void (*poll)(struct work_struct *work)) +{ + timer->xdpsq = xdpsq; + timer->lock = lock; + + INIT_DELAYED_WORK(&timer->dwork, poll); +} +EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer); + +/* ``XDP_TX`` bulking */ + +static void __cold +libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm) +{ + if (frm->len_fl & LIBETH_XDP_TX_MULTI) + libeth_xdp_return_frags(frm->data + frm->soff, true); + + libeth_xdp_return_va(frm->data, true); +} + +static void __cold +libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count) +{ + for (u32 i = 0; i < count; i++) { + const struct libeth_xdp_tx_frame *frm = &bq[i]; + + if (!(frm->len_fl & LIBETH_XDP_TX_FIRST)) + continue; + + libeth_xdp_tx_return_one(frm); + } +} + +static void __cold libeth_trace_xdp_exception(const struct net_device *dev, + const struct bpf_prog *prog, + u32 act) +{ + trace_xdp_exception(dev, prog, act); +} + +/** + * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames + * @bq: XDP Tx frame bulk + * @sent: number of frames sent successfully (from this bulk) + * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.) + * + * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly. + * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust + * the Tx bulk to try again later. + */ +void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent, + u32 flags) +{ + const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent]; + u32 left = bq->count - sent; + + if (!(flags & LIBETH_XDP_TX_NDO)) + libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX); + + if (!(flags & LIBETH_XDP_TX_DROP)) { + memmove(bq->bulk, pos, left * sizeof(*bq->bulk)); + bq->count = left; + + return; + } + + if (flags & LIBETH_XDP_TX_XSK) + libeth_xsk_tx_return_bulk(pos, left); + else if (!(flags & LIBETH_XDP_TX_NDO)) + libeth_xdp_tx_return_bulk(pos, left); + else + libeth_xdp_xmit_return_bulk(pos, left, bq->dev); + + bq->count = 0; +} +EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception); + +/* .ndo_xdp_xmit() implementation */ + +u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq, + u32 count, const struct net_device *dev) +{ + u32 n = 0; + + for (u32 i = 0; i < count; i++) { + const struct libeth_xdp_tx_frame *frm = &bq[i]; + dma_addr_t dma; + + if (frm->flags & LIBETH_XDP_TX_FIRST) + dma = *libeth_xdp_xmit_frame_dma(frm->xdpf); + else + dma = dma_unmap_addr(frm, dma); + + dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len), + DMA_TO_DEVICE); + + /* Actual xdp_frames are freed by the core */ + n += !!(frm->flags & LIBETH_XDP_TX_FIRST); + } + + return n; +} +EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk); + +/* Rx polling path */ + +/** + * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash + * @dst: target &libeth_xdp_buff to initialize + * @src: source stash + * + * External helper used by libeth_xdp_init_buff(), do not call directly. + * Recreate an onstack &libeth_xdp_buff using the stash saved earlier. + * The only field untouched (rxq) is initialized later in the + * abovementioned function. + */ +void libeth_xdp_load_stash(struct libeth_xdp_buff *dst, + const struct libeth_xdp_buff_stash *src) +{ + dst->data = src->data; + dst->base.data_end = src->data + src->len; + dst->base.data_meta = src->data; + dst->base.data_hard_start = src->data - src->headroom; + + dst->base.frame_sz = src->frame_sz; + dst->base.flags = src->flags; +} +EXPORT_SYMBOL_GPL(libeth_xdp_load_stash); + +/** + * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash + * @dst: target &libeth_xdp_buff_stash to initialize + * @src: source XDP buffer + * + * External helper used by libeth_xdp_save_buff(), do not call directly. + * Use the fields from the passed XDP buffer to initialize the stash on the + * queue, so that a partially received frame can be finished later during + * the next NAPI poll. + */ +void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst, + const struct libeth_xdp_buff *src) +{ + dst->data = src->data; + dst->headroom = src->data - src->base.data_hard_start; + dst->len = src->base.data_end - src->data; + + dst->frame_sz = src->base.frame_sz; + dst->flags = src->base.flags; + + WARN_ON_ONCE(dst->flags != src->base.flags); +} +EXPORT_SYMBOL_GPL(libeth_xdp_save_stash); + +void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash) +{ + LIBETH_XDP_ONSTACK_BUFF(xdp); + + libeth_xdp_load_stash(xdp, stash); + libeth_xdp_return_buff_slow(xdp); + + stash->data = NULL; +} +EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash); + +/** + * libeth_xdp_return_buff_slow - free &libeth_xdp_buff + * @xdp: buffer to free/return + * + * Slowpath version of libeth_xdp_return_buff() to be called on exceptions, + * queue clean-ups etc., without unwanted inlining. + */ +void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp) +{ + __libeth_xdp_return_buff(xdp, false); +} +EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow); + +/** + * libeth_xdp_buff_add_frag - add frag to XDP buffer + * @xdp: head XDP buffer + * @fqe: Rx buffer containing the frag + * @len: frag length reported by HW + * + * External helper used by libeth_xdp_process_buff(), do not call directly. + * Frees both head and frag buffers on error. + * + * Return: true success, false on error (no space for a new frag). + */ +bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp, + const struct libeth_fqe *fqe, + u32 len) +{ + netmem_ref netmem = fqe->netmem; + + if (!xdp_buff_add_frag(&xdp->base, netmem, + fqe->offset + netmem_get_pp(netmem)->p.offset, + len, fqe->truesize)) + goto recycle; + + return true; + +recycle: + libeth_rx_recycle_slow(netmem); + libeth_xdp_return_buff_slow(xdp); + + return false; +} +EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag); + +/** + * libeth_xdp_prog_exception - handle XDP prog exceptions + * @bq: XDP Tx bulk + * @xdp: buffer to process + * @act: original XDP prog verdict + * @ret: error code if redirect failed + * + * External helper used by __libeth_xdp_run_prog() and + * __libeth_xsk_run_prog_slow(), do not call directly. + * Reports invalid @act, XDP exception trace event and frees the buffer. + * + * Return: libeth_xdp XDP prog verdict. + */ +u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq, + struct libeth_xdp_buff *xdp, + enum xdp_action act, int ret) +{ + if (act > XDP_REDIRECT) + bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act); + + libeth_trace_xdp_exception(bq->dev, bq->prog, act); + + if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) + return libeth_xsk_prog_exception(xdp, act, ret); + + libeth_xdp_return_buff_slow(xdp); + + return LIBETH_XDP_DROP; +} +EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception); + +/* Tx buffer completion */ + +static void libeth_xdp_put_netmem_bulk(netmem_ref netmem, + struct xdp_frame_bulk *bq) +{ + if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE)) + xdp_flush_frame_bulk(bq); + + bq->q[bq->count++] = netmem; +} + +/** + * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk + * @sinfo: shared info corresponding to the buffer + * @bq: XDP frame bulk to store the buffer + * @frags: whether the buffer has frags + * + * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx + * completion of ``XDP_TX`` buffers and allows to free them in same bulks + * with &xdp_frame buffers. + */ +void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo, + struct xdp_frame_bulk *bq, bool frags) +{ + if (!frags) + goto head; + + for (u32 i = 0; i < sinfo->nr_frags; i++) + libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]), + bq); + +head: + libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq); +} +EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk); + +/* Misc */ + +/** + * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold + * @count: number of descriptors in the queue + * + * The threshold is the limit at which RQs start to refill (when the number of + * empty buffers exceeds it) and SQs get cleaned up (when the number of free + * descriptors goes below it). To speed up hotpath processing, threshold is + * always pow-2, closest to 1/4 of the queue length. + * Don't call it on hotpath, calculate and cache the threshold during the + * queue initialization. + * + * Return: the calculated threshold. + */ +u32 libeth_xdp_queue_threshold(u32 count) +{ + u32 quarter, low, high; + + if (likely(is_power_of_2(count))) + return count >> 2; + + quarter = DIV_ROUND_CLOSEST(count, 4); + low = rounddown_pow_of_two(quarter); + high = roundup_pow_of_two(quarter); + + return high - quarter <= quarter - low ? high : low; +} +EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold); + +/** + * __libeth_xdp_set_features - set XDP features for netdev + * @dev: &net_device to configure + * @xmo: XDP metadata ops (Rx hints) + * @zc_segs: maximum number of S/G frags the HW can transmit + * @tmo: XSk Tx metadata ops (Tx hints) + * + * Set all the features libeth_xdp supports. Only the first argument is + * necessary; without the third one (zero), XSk support won't be advertised. + * Use the non-underscored versions in drivers instead. + */ +void __libeth_xdp_set_features(struct net_device *dev, + const struct xdp_metadata_ops *xmo, + u32 zc_segs, + const struct xsk_tx_metadata_ops *tmo) +{ + xdp_set_features_flag(dev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT | + (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) | + NETDEV_XDP_ACT_RX_SG | + NETDEV_XDP_ACT_NDO_XMIT_SG); + dev->xdp_metadata_ops = xmo; + + tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo; + + dev->xdp_zc_max_segs = zc_segs ? : 1; + dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL; +} +EXPORT_SYMBOL_GPL(__libeth_xdp_set_features); + +/** + * libeth_xdp_set_redirect - toggle the XDP redirect feature + * @dev: &net_device to configure + * @enable: whether XDP is enabled + * + * Use this when XDPSQs are not always available to dynamically enable + * and disable redirect feature. + */ +void libeth_xdp_set_redirect(struct net_device *dev, bool enable) +{ + if (enable) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); +} +EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect); + +/* Module */ + +static const struct libeth_xdp_ops xdp_ops __initconst = { + .bulk = libeth_xdp_return_buff_bulk, + .xsk = libeth_xsk_buff_free_slow, +}; + +static int __init libeth_xdp_module_init(void) +{ + libeth_attach_xdp(&xdp_ops); + + return 0; +} +module_init(libeth_xdp_module_init); + +static void __exit libeth_xdp_module_exit(void) +{ + libeth_detach_xdp(); +} +module_exit(libeth_xdp_module_exit); + +MODULE_DESCRIPTION("Common Ethernet library - XDP infra"); +MODULE_IMPORT_NS("LIBETH"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c new file mode 100644 index 000000000000..846e902e31b6 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/xsk.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2025 Intel Corporation */ + +#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP" + +#include <linux/export.h> + +#include <net/libeth/xsk.h> + +#include "priv.h" + +/* ``XDP_TX`` bulking */ + +void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, + u32 count) +{ + for (u32 i = 0; i < count; i++) + libeth_xsk_buff_free_slow(bq[i].xsk); +} + +/* XSk TMO */ + +const struct xsk_tx_metadata_ops libeth_xsktmo_slow = { + .tmo_request_checksum = libeth_xsktmo_req_csum, +}; + +/* Rx polling path */ + +/** + * libeth_xsk_buff_free_slow - free an XSk Rx buffer + * @xdp: buffer to free + * + * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc. + * to avoid unwanted inlining. + */ +void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp) +{ + xsk_buff_free(&xdp->base); +} +EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow); + +/** + * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer + * @head: head buffer + * @xdp: frag buffer + * + * External helper used by libeth_xsk_process_buff(), do not call directly. + * Frees both main and frag buffers on error. + * + * Return: main buffer with attached frag on success, %NULL on error (no space + * for a new frag). + */ +struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head, + struct libeth_xdp_buff *xdp) +{ + if (!xsk_buff_add_frag(&head->base, &xdp->base)) + goto free; + + return head; + +free: + libeth_xsk_buff_free_slow(xdp); + libeth_xsk_buff_free_slow(head); + + return NULL; +} +EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag); + +/** + * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info + * @rs: onstack stats to update + * @xdp: buffer to account + * + * External helper used by __libeth_xsk_run_pass(), do not call directly. + * Adds buffer's frags count and total len to the onstack stats. + */ +void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs, + const struct libeth_xdp_buff *xdp) +{ + libeth_xdp_buff_stats_frags(rs, xdp); +} +EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags); + +/** + * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts + * @xdp: buffer to process + * @bq: Tx bulk for queueing on ``XDP_TX`` + * @act: verdict to process + * @ret: error code if ``XDP_REDIRECT`` failed + * + * External helper used by __libeth_xsk_run_prog(), do not call directly. + * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus + * it is processed inline. The rest goes here for out-of-line processing, + * together with redirect errors. + * + * Return: libeth_xdp XDP prog verdict. + */ +u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp, + const struct libeth_xdp_tx_bulk *bq, + enum xdp_action act, int ret) +{ + switch (act) { + case XDP_DROP: + xsk_buff_free(&xdp->base); + + return LIBETH_XDP_DROP; + case XDP_TX: + return LIBETH_XDP_TX; + case XDP_PASS: + return LIBETH_XDP_PASS; + default: + break; + } + + return libeth_xdp_prog_exception(bq, xdp, act, ret); +} +EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow); + +/** + * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk + * @xdp: buffer to process + * @act: verdict returned by the prog + * @ret: error code if ``XDP_REDIRECT`` failed + * + * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the + * current NAPI poll when there are no free buffers left. + * + * Return: libeth_xdp's XDP prog verdict. + */ +u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, + enum xdp_action act, int ret) +{ + const struct xdp_buff_xsk *xsk; + u32 __ret = LIBETH_XDP_DROP; + + if (act != XDP_REDIRECT) + goto drop; + + xsk = container_of(&xdp->base, typeof(*xsk), xdp); + if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS) + __ret = LIBETH_XDP_ABORTED; + +drop: + libeth_xsk_buff_free_slow(xdp); + + return __ret; +} + +/* Refill */ + +/** + * libeth_xskfq_create - create an XSkFQ + * @fq: fill queue to initialize + * + * Allocates the FQEs and initializes the fields used by libeth_xdp: number + * of buffers to refill, refill threshold and buffer len. + * + * Return: %0 on success, -errno otherwise. + */ +int libeth_xskfq_create(struct libeth_xskfq *fq) +{ + fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL, + fq->nid); + if (!fq->fqes) + return -ENOMEM; + + fq->pending = fq->count; + fq->thresh = libeth_xdp_queue_threshold(fq->count); + fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool); + + return 0; +} +EXPORT_SYMBOL_GPL(libeth_xskfq_create); + +/** + * libeth_xskfq_destroy - destroy an XSkFQ + * @fq: fill queue to destroy + * + * Zeroes the used fields and frees the FQEs array. + */ +void libeth_xskfq_destroy(struct libeth_xskfq *fq) +{ + fq->buf_len = 0; + fq->thresh = 0; + fq->pending = 0; + + kvfree(fq->fqes); +} +EXPORT_SYMBOL_GPL(libeth_xskfq_destroy); + +/* .ndo_xsk_wakeup */ + +static void libeth_xsk_napi_sched(void *info) +{ + __napi_schedule_irqoff(info); +} + +/** + * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure + * @csd: struct to initialize + * @napi: NAPI corresponding to this queue + * + * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order + * to do that, the corresponding CSDs must be initialized when creating the + * queues. + */ +void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi) +{ + INIT_CSD(csd, libeth_xsk_napi_sched, napi); +} +EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup); + +/** + * libeth_xsk_wakeup - perform an XSk wakeup + * @csd: CSD corresponding to the queue + * @qid: the stack queue index + * + * Try to mark the NAPI as missed first, so that it could be rescheduled. + * If it's not, schedule it on the corresponding CPU using IPIs (or directly + * if already running on it). + */ +void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid) +{ + struct napi_struct *napi = csd->info; + + if (napi_if_scheduled_mark_missed(napi) || + unlikely(!napi_schedule_prep(napi))) + return; + + if (unlikely(qid >= nr_cpu_ids)) + qid %= nr_cpu_ids; + + if (qid != raw_smp_processor_id() && cpu_online(qid)) + smp_call_function_single_async(qid, csd); + else + __napi_schedule(napi); +} +EXPORT_SYMBOL_GPL(libeth_xsk_wakeup); + +/* Pool setup */ + +#define LIBETH_XSK_DMA_ATTR \ + (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC) + +/** + * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue + * @dev: target &net_device + * @qid: stack queue index to configure + * @enable: whether to enable or disable the pool + * + * Check that @qid is valid and then map or unmap the pool. + * + * Return: %0 on success, -errno otherwise. + */ +int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable) +{ + struct xsk_buff_pool *pool; + + pool = xsk_get_pool_from_qid(dev, qid); + if (!pool) + return -EINVAL; + + if (enable) + return xsk_pool_dma_map(pool, dev->dev.parent, + LIBETH_XSK_DMA_ATTR); + else + xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR); + + return 0; +} +EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool); diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c index 66a9825fe11f..6fda656afa9c 100644 --- a/drivers/net/ethernet/intel/libie/rx.c +++ b/drivers/net/ethernet/intel/libie/rx.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2024 Intel Corporation */ +/* Copyright (C) 2024-2025 Intel Corporation */ +#define DEFAULT_SYMBOL_NAMESPACE "LIBIE" + +#include <linux/export.h> #include <linux/net/intel/libie/rx.h> /* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed @@ -116,7 +119,7 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = { LIBIE_RX_PT_IP(4), LIBIE_RX_PT_IP(6), }; -EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, "LIBIE"); +EXPORT_SYMBOL_GPL(libie_rx_pt_lut); MODULE_DESCRIPTION("Intel(R) Ethernet common library"); MODULE_IMPORT_NS("LIBETH"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 147571fdada3..feab392ab2ee 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -5014,8 +5014,6 @@ static int mvneta_ethtool_get_rxnfc(struct net_device *dev, case ETHTOOL_GRXRINGS: info->data = rxq_number; return 0; - case ETHTOOL_GRXFH: - return -EOPNOTSUPP; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8ed83fb98862..44b201817d94 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx, return 0; } -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info) { u16 hash_opts = 0; u32 flow_type; @@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); } -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info) { unsigned long hash_opts; u32 flow_type; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 85c9c6e80678..caadf3aea95d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx, int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx, u32 *indir); -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info); void mvpp2_cls_init(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a7872d14a49d..8ebb985d2573 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5588,9 +5588,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_GRXFH: - ret = mvpp2_ethtool_rxfh_get(port, info); - break; case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; @@ -5628,9 +5625,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_SRXFH: - ret = mvpp2_ethtool_rxfh_set(port, info); - break; case ETHTOOL_SRXCLSRLINS: ret = mvpp2_ethtool_cls_rule_ins(port, info); break; @@ -5747,6 +5741,29 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); } +static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_get(port, info); +} + +static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_set(port, info); +} + static int mvpp2_ethtool_get_eee(struct net_device *dev, struct ethtool_keee *eee) { @@ -5813,6 +5830,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, + .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields, + .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields, .create_rxfh_context = mvpp2_create_rxfh_context, .modify_rxfh_context = mvpp2_modify_rxfh_context, .remove_rxfh_context = mvpp2_remove_rxfh_context, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index ccea37847df8..532813d8d028 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,4 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o + rvu_rep.o cn20k/mbox_init.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h new file mode 100644 index 000000000000..4285b5d6a6a2 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_API_H +#define CN20K_API_H + +#include "../rvu.h" + +struct ng_rvu { + struct mbox_ops *rvu_mbox_ops; + struct qmem *pf_mbox_addr; + struct qmem *vf_mbox_addr; +}; + +/* Mbox related APIs */ +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num); +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap); +void cn20k_free_mbox_memory(struct rvu *rvu); +int cn20k_register_afpf_mbox_intr(struct rvu *rvu); +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start); +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu); +void cn20k_rvu_unregister_interrupts(struct rvu *rvu); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs); +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs); +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs); +#endif /* CN20K_API_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c new file mode 100644 index 000000000000..bd3aab7770dd --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include "rvu_trace.h" +#include "mbox.h" +#include "reg.h" +#include "api.h" + +static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = rvupf_read64(rvu, rvu_irq_data->intr_status); + rvupf_write64(rvu, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); + + rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start) +{ + struct rvu_irq_data *irq_data; + int intr_vec, offset, vec = 0; + int err; + + /* irq data for 4 VFPF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; + intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + } + irq_data[vec].afvf_queue_work_hdlr = + rvu_queue_work; + offset = pf_vec_start + intr_vec; + irq_data[vec].vec_num = offset; + irq_data[vec].rvu = rvu; + + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d", + vec / 2, vec % 2); + err = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + &irq_data[vec]); + if (err) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for AFVF mbox irq\n"); + return err; + } + rvu->irq_allocated[offset] = true; + } + + return 0; +} + +/* CN20K mbox PFx => AF irq handler */ +static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Clear interrupts */ + intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status); + rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); + + /* Sync with mbox memory region */ + rmb(); + + rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info, + rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); +} + +void cn20k_rvu_unregister_interrupts(struct rvu *rvu) +{ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); +} + +int cn20k_register_afpf_mbox_intr(struct rvu *rvu) +{ + struct rvu_irq_data *irq_data; + int intr_vec, ret, vec = 0; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <= + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++, + vec++) { + switch (intr_vec) { + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].rvu = rvu; + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[intr_vec * NAME_SIZE], + "RVUAF PFAF%d Mbox%d", + vec / 2, vec % 2); + ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[intr_vec * NAME_SIZE], + &irq_data[vec]); + if (ret) + return ret; + + rvu->irq_allocated[intr_vec] = true; + } + + return 0; +} + +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap) +{ + int region; + u64 bar; + + if (type == TYPE_AFVF) { + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base); + bar += region * MBOX_SIZE; + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; + } + + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base); + bar += region * MBOX_SIZE; + + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; +} + +static int rvu_alloc_mbox_memory(struct rvu *rvu, int type, + int ndevs, int mbox_size) +{ + struct qmem *mbox_addr; + dma_addr_t iova; + int pf, err; + + /* Allocate contiguous memory for mailbox communication. + * eg: AF <=> PFx mbox memory + * This allocated memory is split into chunks of MBOX_SIZE + * and setup into each of the RVU PFs. In HW this memory will + * get aliased to an offset within BAR2 of those PFs. + * + * AF will access mbox memory using direct physical addresses + * and PFs will access the same shared memory from BAR2. + * + * PF <=> VF mbox memory also works in the same fashion. + * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs + */ + + err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size); + if (err) + return -ENOMEM; + + switch (type) { + case TYPE_AFPF: + rvu->ng_rvu->pf_mbox_addr = mbox_addr; + iova = (u64)mbox_addr->iova; + for (pf = 0; pf < ndevs; pf++) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf), + (u64)iova); + iova += mbox_size; + } + break; + case TYPE_AFVF: + rvu->ng_rvu->vf_mbox_addr = mbox_addr; + rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + break; + default: + return 0; + } + + return 0; +} + +static struct mbox_ops cn20k_mbox_ops = { + .pf_intr_handler = cn20k_mbox_pf_common_intr_handler, + .afvf_intr_handler = cn20k_afvf_mbox_intr_handler, +}; + +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs) +{ + int dev; + + if (!is_cn20k(rvu->pdev)) + return 0; + + rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops; + + if (type == TYPE_AFVF) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE)); + } else { + for (dev = 0; dev < ndevs; dev++) + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE)); + } + + return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE); +} + +void cn20k_free_mbox_memory(struct rvu *rvu) +{ + if (!is_cn20k(rvu->pdev)) + return; + + qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr); + qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr); +} + +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs) +{ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs) +{ + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf) +{ + int qints, hwctx_size, err; + u64 cfg, ctx_cfg; + + if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev)) + return 0; + + ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); + /* Alloc memory for CQINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 24) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), + (u64)pfvf->cq_ints_ctx->iova); + + /* Alloc memory for QINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 12) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), + (u64)pfvf->nix_qints_ctx->iova); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h new file mode 100644 index 000000000000..affb39803120 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef RVU_MBOX_REG_H +#define RVU_MBOX_REG_H +#include "../rvu.h" +#include "../rvu_reg.h" + +/* RVUM block registers */ +#define RVU_PF_DISC (0x0) +#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16) +#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12) + +/* Mbox Registers */ +/* RVU AF BAR0 Mbox registers for AF => PFx */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4) +#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3) +#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6) + +/* RVU PF => AF mbox registers */ +#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3) +#define RVU_MBOX_PF_INT (0xC20) +#define RVU_MBOX_PF_INT_W1S (0xC28) +#define RVU_MBOX_PF_INT_ENA_W1S (0xC30) +#define RVU_MBOX_PF_INT_ENA_W1C (0xC38) + +#define RVU_AF_BAR2_SEL (0x9000000) +#define RVU_AF_BAR2_PFID (0x16400) +#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12) +#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12) + +#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6) + +#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3) +#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4) +#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4) + +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) + +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + +#define RVU_MBOX_PF_VF_ADDR (0xC40) +#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VF_CFG (0xC60) + +#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3) +#define RVU_MBOX_VF_INT (0x20) +#define RVU_MBOX_VF_INT_W1S (0x28) +#define RVU_MBOX_VF_INT_ENA_W1S (0x30) +#define RVU_MBOX_VF_INT_ENA_W1C (0x38) + +#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3) +#endif /* RVU_MBOX_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h new file mode 100644 index 000000000000..76ce3ec6da9c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef STRUCT_H +#define STRUCT_H + +/* + * CN20k RVU PF MBOX Interrupt Vector Enumeration + * + * Vectors 0 - 3 are compatible with pre cn20k and hence + * existing macros are being reused. + */ +enum rvu_mbox_pf_int_vec_e { + RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8, + RVU_MBOX_PF_INT_VEC_CNT = 0x9, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_cn20k_int_vec_e { + RVU_AF_CN20K_INT_VEC_POISON = 0x0, + RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1, + RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2, + RVU_AF_CN20K_INT_VEC_PFME0 = 0x3, + RVU_AF_CN20K_INT_VEC_PFME1 = 0x4, + RVU_AF_CN20K_INT_VEC_GEN = 0x5, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, + RVU_AF_CN20K_INT_VEC_CNT = 0xa, +}; +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 406c59100a35..8a08bebf08c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -39,7 +39,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u16 entry_sz; + u32 entry_sz; u8 align; u32 qsize; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 7d21905deed8..75872d257eca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -10,8 +10,11 @@ #include <linux/pci.h> #include "rvu_reg.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #include "mbox.h" #include "rvu_trace.h" +#include "rvu.h" static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); @@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) mdev->rsp_size = 0; tx_hdr->num_msgs = 0; tx_hdr->msg_size = 0; + tx_hdr->sig = 0; rx_hdr->num_msgs = 0; rx_hdr->msg_size = 0; + rx_hdr->sig = 0; } EXPORT_SYMBOL(__otx2_mbox_reset); @@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox) } EXPORT_SYMBOL(otx2_mbox_destroy); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_VFPF: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1); + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + mbox->reg_base = reg_base; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + mbox->ndevs = ndevs; + + return 0; +} + static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, void *reg_base, int direction, int ndevs) { + if (is_cn20k(pdev)) + return cn20k_mbox_setup(mbox, pdev, reg_base, + direction, ndevs); + switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_PFVF: @@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) spin_lock(&mdev->mbox_lock); - tx_hdr->msg_size = mdev->msg_size; + if (!tx_hdr->sig) { + tx_hdr->msg_size = mdev->msg_size; + tx_hdr->num_msgs = mdev->num_msgs; + } /* Reset header for next messages */ mdev->msg_size = 0; @@ -248,7 +345,6 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) * messages. So this should be written after writing all the messages * to the shared memory. */ - tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset); @@ -309,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_msghdr *msghdr = NULL; + struct mbox_hdr *mboxhdr = NULL; spin_lock(&mdev->mbox_lock); size = ALIGN(size, MBOX_MSG_ALIGN); @@ -332,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, mdev->msg_size += size; mdev->rsp_size += size_rsp; msghdr->next_msgoff = mdev->msg_size + msgs_offset; + + mboxhdr = mdev->mbase + mbox->tx_start; + /* Clear the msg header region */ + memset(mboxhdr, 0, msgs_offset); + exit: spin_unlock(&mdev->mbox_lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index a213b2663583..b3562d658d45 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -13,6 +13,7 @@ #include "rvu_struct.h" #include "common.h" +#include "cn20k/struct.h" #define MBOX_SIZE SZ_64K @@ -50,6 +51,11 @@ #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ +enum { + TYPE_AFVF, + TYPE_AFPF, +}; + struct otx2_mbox_dev { void *mbase; /* This dev's mbox region */ void *hwbase; @@ -78,6 +84,8 @@ struct otx2_mbox { struct mbox_hdr { u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ + u16 opt_msg; + u8 sig; }; /* Header which precedes every msg and is also part of it */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index 0277d226293e..d7030dfa5dad 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; event->intr_mask &= pfvf->intr_mask; @@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) struct mcs_intr_info *req; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); mutex_lock(&rvu->mbox_lock); @@ -193,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu, if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; mcs->pf_map[0] = pcifunc; pfvf->intr_mask = req->intr_mask; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index a8025f0486c9..bfee71f4cddc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -20,6 +20,8 @@ #include "rvu_trace.h" #include "rvu_npc_hash.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" @@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)); -enum { - TYPE_AFVF, - TYPE_AFPF, -}; +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq); +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq); /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { @@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or @@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } block->fn_map[lf] = attach ? pcifunc : 0; @@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); } -inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) { u64 cfg; @@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc) int pf, func; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); func = pcifunc & RVU_PFVF_FUNC_MASK; /* Get first HWVF attached to this PF */ @@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) if (pcifunc & RVU_PFVF_FUNC_MASK) return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; else - return &rvu->pf[rvu_get_pf(pcifunc)]; + return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; } static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) @@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) int pf, vf, nvfs; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (pf >= rvu->hw->total_pfs) return false; @@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu) rvu_reset_msix(rvu); mutex_destroy(&rvu->rsrc_lock); + + /* Free the QINT/CINT memory */ + pfvf = &rvu->pf[RVU_AFPF]; + qmem_free(rvu->dev, pfvf->nix_qints_ctx); + qmem_free(rvu->dev, pfvf->cq_ints_ctx); } static void rvu_setup_pfvf_macaddress(struct rvu *rvu) @@ -1487,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); /* All CGX mapped PFs are set with assigned NIX block during init */ - if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { blkaddr = pf->nix_blkaddr; } else if (is_lbk_vf(rvu, pcifunc)) { vf = pcifunc - 1; @@ -1501,7 +1501,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) } /* if SDP1 then the blkaddr is NIX1 */ - if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1) blkaddr = BLKADDR_NIX1; switch (blkaddr) { @@ -2006,7 +2006,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, vf = pcifunc & RVU_PFVF_FUNC_MASK; cfg = rvu_read64(rvu, BLKADDR_RVUM, - RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc))); numvfs = (cfg >> 12) & 0xFF; if (vf && vf <= numvfs) @@ -2223,15 +2223,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) { + req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs; + rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL, + RVU_AF_BAR2_PFID); + if (type == TYPE_AFPF) + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)), + 0x1); + else + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)), + 0x1); + usleep_range(5000, 6000); + goto done; + } + for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { msg = mdev->mbase + offset; /* Set which PF/VF sent this message based on mbox IRQ */ switch (type) { case TYPE_AFPF: - msg->pcifunc &= - ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); - msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev); + msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0); break; case TYPE_AFVF: msg->pcifunc &= @@ -2249,16 +2264,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", err, otx2_mbox_id2name(msg->id), - msg->id, rvu_get_pf(msg->pcifunc), + msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc), (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, devid); } +done: mw->mbox_wrk[devid].num_msgs = 0; - if (poll) + if (!is_cn20k(mbox->pdev) && poll) otx2_mbox_wait_for_zero(mbox, devid); /* Send mbox responses to VF/PF */ @@ -2364,13 +2380,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) __rvu_mbox_up_handler(mwork, TYPE_AFVF); } -static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, +static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr, int num, int type, unsigned long *pf_bmap) { struct rvu_hwinfo *hw = rvu->hw; int region; u64 bar4; + /* For cn20k platform AF mailbox region is allocated by software + * and the corresponding IOVA is programmed in hardware unlike earlier + * silicons where software uses the hardware region after ioremap. + */ + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr, + num, type, pf_bmap); + /* For cn10k platform VF mailbox regions of a PF follows after the * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from * RVU_PF_VF_BAR4_ADDR register. @@ -2389,7 +2413,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2412,7 +2436,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, RVU_AF_PF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2420,20 +2444,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, error: while (region--) - iounmap((void __iomem *)mbox_addr[region]); + iounmap(mbox_addr[region]); return -ENOMEM; } +static struct mbox_ops rvu_mbox_ops = { + .pf_intr_handler = rvu_mbox_pf_intr_handler, + .afvf_intr_handler = rvu_mbox_intr_handler, +}; + static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) { int err = -EINVAL, i, dir, dir_up; + void __iomem **mbox_regions; + struct ng_rvu *ng_rvu_mbox; void __iomem *reg_base; struct rvu_work *mwork; unsigned long *pf_bmap; - void **mbox_regions; const char *name; u64 cfg; @@ -2441,6 +2471,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, if (!pf_bmap) return -ENOMEM; + ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL); + if (!ng_rvu_mbox) { + err = -ENOMEM; + goto free_bitmap; + } + /* RVU VFs */ if (type == TYPE_AFVF) bitmap_set(pf_bmap, 0, num); @@ -2454,12 +2490,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } } + rvu->ng_rvu = ng_rvu_mbox; + + rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops; + + err = cn20k_rvu_mbox_init(rvu, type, num); + if (err) + goto free_mem; + mutex_init(&rvu->mbox_lock); - mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); + mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; - goto free_bitmap; + goto free_qmem; } switch (type) { @@ -2486,7 +2530,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } mw->mbox_wq = alloc_workqueue("%s", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + WQ_HIGHPRI | WQ_MEM_RECLAIM, num, name); if (!mw->mbox_wq) { err = -ENOMEM; @@ -2538,6 +2582,10 @@ unmap_regions: iounmap((void __iomem *)mbox_regions[num]); free_regions: kfree(mbox_regions); +free_qmem: + cn20k_free_mbox_memory(rvu); +free_mem: + kfree(rvu->ng_rvu); free_bitmap: bitmap_free(pf_bmap); return err; @@ -2564,8 +2612,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw) otx2_mbox_destroy(&mw->mbox_up); } -static void rvu_queue_work(struct mbox_wq_info *mw, int first, - int mdevs, u64 intr) +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -2656,6 +2704,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; + if (is_cn20k(rvu->pdev)) { + cn20k_rvu_enable_mbox_intr(rvu); + return; + } + /* Clear spurious irqs, if any */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); @@ -2773,7 +2826,7 @@ static void rvu_flr_handler(struct work_struct *work) cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); for (vf = 0; vf < numvfs; vf++) __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); @@ -2909,9 +2962,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu_cpt_unregister_interrupts(rvu); - /* Disable the Mbox interrupt */ - rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, - INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + if (!is_cn20k(rvu->pdev)) + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + else + cn20k_rvu_unregister_interrupts(rvu); /* Disable the PF FLR interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, @@ -2944,6 +3000,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) * VF interrupts can be handled. Offset equal to zero means * that PF vectors are not configured and overlapping AF vectors. */ + if (is_cn20k(rvu->pdev)) + return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT + + RVU_MBOX_PF_INT_VEC_CNT) && offset; + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && offset; } @@ -2974,18 +3034,30 @@ static int rvu_register_interrupts(struct rvu *rvu) return ret; } - /* Register mailbox interrupt handler */ - sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); - ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_pf_intr_handler, 0, - &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); - if (ret) { - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for mbox irq\n"); - goto fail; - } + if (!is_cn20k(rvu->pdev)) { + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], + "RVUAF Mbox"); + ret = request_irq(pci_irq_vector + (rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * + NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } - rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + } else { + ret = cn20k_register_afpf_mbox_intr(rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } + } /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); @@ -3040,34 +3112,40 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Get PF MSIX vectors offset. */ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + if (!is_cn20k(rvu->pdev)) { + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); - /* Register MBOX0 interrupt. */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox0\n"); - - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; - /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so - * simply increment current offset by 1. - */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox1\n"); + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; + } else { + ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox\n"); + } /* Register FLR interrupt handler for AF's VFs */ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; @@ -3178,6 +3256,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_disable_afvf_intr(rvu, vfs); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); @@ -3194,6 +3275,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_enable_afvf_intr(rvu, vfs); + /* Clear any pending interrupts and enable AF VF interrupts for * the first 64 VFs. */ @@ -3438,6 +3522,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); + /* Alloc CINT and QINT memory */ + rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0, + (rvu->hw->block[BLKADDR_NIX0].lf.max)); return 0; err_dl: rvu_unregister_dl(rvu); @@ -3489,6 +3576,9 @@ static void rvu_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, rvu->hw); + if (is_cn20k(rvu->pdev)) + cn20k_free_mbox_memory(rvu); + kfree(rvu->ng_rvu); devm_kfree(&pdev->dev, rvu); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 48f66292ad5c..7ee1fdeb5295 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -10,6 +10,7 @@ #include <linux/pci.h> #include <net/devlink.h> +#include <linux/soc/marvell/silicons.h> #include "rvu_struct.h" #include "rvu_devlink.h" @@ -43,12 +44,39 @@ #define MAX_CPT_BLKS 2 /* PF_FUNC */ -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_OTX2_PFVF_PF_SHIFT 10 +#define RVU_OTX2_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_CN20K_PFVF_PF_SHIFT 9 +#define RVU_CN20K_PFVF_PF_MASK 0x7F + +static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func) +{ + if (is_cn20k(pdev)) + return ((pf & RVU_CN20K_PFVF_PF_MASK) << + RVU_CN20K_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); + else + return ((pf & RVU_OTX2_PFVF_PF_MASK) << + RVU_OTX2_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); +} + +static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev) +{ + if (is_cn20k(pdev)) + return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT); + else + return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT); +} + +#define RVU_AFPF 25 #ifdef CONFIG_DEBUG_FS + struct dump_ctx { int lf; int id; @@ -446,6 +474,23 @@ struct mbox_wq_info { struct workqueue_struct *mbox_wq; }; +struct rvu_irq_data { + u64 intr_status; + void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + struct rvu *rvu; + int vec_num; + int start; + int mdevs; +}; + +struct mbox_ops { + irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq); + irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq); +}; + struct channel_fwdata { struct sdp_node_info info; u8 valid; @@ -611,6 +656,8 @@ struct rvu { struct list_head rep_evtq_head; /* Representor event lock */ spinlock_t rep_evtq_lock; + + struct ng_rvu *ng_rvu; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -836,7 +883,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); -int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); @@ -865,8 +911,8 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); /* SDP APIs */ int rvu_sdp_init(struct rvu *rvu); -bool is_sdp_pfvf(u16 pcifunc); -bool is_sdp_pf(u16 pcifunc); +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc); +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc); bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) @@ -877,11 +923,21 @@ static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) return false; } +static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc) +{ + if (is_cn20k(pdev)) + return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) & + RVU_CN20K_PFVF_PF_MASK; + else + return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) & + RVU_OTX2_PFVF_PF_MASK; +} + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && - !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); + !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0)); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -893,7 +949,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) { return ((pcifunc & RVU_PFVF_FUNC_MASK) && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))); + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))); } #define M(_name, _id, fn_name, req, rsp) \ @@ -901,6 +957,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); MBOX_MESSAGES #undef M +/* Mbox APIs */ +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + int rvu_cgx_init(struct rvu *rvu); int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); @@ -955,7 +1015,8 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx, u16 mcam_index); void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); - +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index d0331b0e0bfd..b79db887ab9b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -457,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu) inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return false; return true; } @@ -484,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -501,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -526,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int i = 0, lmac_count = 0; struct mac_ops *mac_ops; u8 max_dmac_filters; @@ -577,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; @@ -633,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *parent_pf; struct mac_ops *mac_ops; u8 cgx_idx, lmac; @@ -663,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; @@ -681,7 +681,7 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -701,7 +701,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; @@ -725,7 +725,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -743,7 +743,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct cgx_max_dmac_entries_get_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; /* If msg is received from PFs(which are not mapped to CGX LMACs) @@ -769,7 +769,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; u64 cfg; @@ -790,7 +790,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -809,7 +809,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -828,7 +828,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -864,7 +864,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) return -EPERM; return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); @@ -878,7 +878,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) @@ -917,7 +917,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; int pf, err; - pf = rvu_get_pf(req->hdr.pcifunc); + pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; @@ -933,7 +933,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; @@ -975,7 +975,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1005,7 +1005,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_pfc = 0, tx_pfc = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1046,7 +1046,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; int err = 0; @@ -1073,7 +1073,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1106,7 +1106,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, /* Assumes LF of a PF and all of its VF belongs to the same * NIX block */ - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -1133,10 +1133,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); mutex_lock(&rvu->cgx_cfg_lock); @@ -1179,7 +1179,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1195,7 +1195,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!rvu->fwdata) @@ -1222,7 +1222,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; @@ -1238,7 +1238,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1256,7 +1256,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1272,7 +1272,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_8023 = 0, tx_8023 = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1310,7 +1310,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -1335,7 +1335,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; struct cgx *cgxd; u8 cgx, lmac; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 4a3370a40dd8..05adc54535eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -66,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + + return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -83,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 3c5bbaf12e59..f404117bf6c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (pcifunc & RVU_PFVF_FUNC_MASK) return false; @@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index c827da626471..0c20642f81b9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -688,7 +688,7 @@ static int get_max_column_width(struct rvu *rvu) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -759,7 +759,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { off = 0; flag = 0; - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -842,7 +842,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) cgx[0] = 0; lmac[0] = 0; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->nix_blkaddr == BLKADDR_NIX0) @@ -2623,10 +2623,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) pcifunc = ipolicer->pfvf_map[idx]; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(m, "Allocated to :: PF %d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(m, "Allocated to :: PF %d VF %d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); print_band_prof_ctx(m, &aq_rsp.prof); } @@ -2983,10 +2983,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(s, "\n\t\t Device \t\t: PF%d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); if (entry_acnt) { @@ -3049,13 +3049,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) seq_puts(filp, "\n\t\t Current allocation\n"); seq_puts(filp, "\t\t====================\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; for (vf = 0; vf < numvfs; vf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); } } @@ -3326,7 +3326,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { - pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, iter->owner); seq_printf(s, "\n\tInstalled by: PF%d ", pf); if (iter->owner & RVU_PFVF_FUNC_MASK) { @@ -3344,7 +3344,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) rvu_dbg_npc_mcam_show_flows(s, iter); if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; - pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, target); seq_printf(s, "\tForward to: PF%d ", pf); if (target & RVU_PFVF_FUNC_MASK) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 613655fcd34f..bdf4d852c15d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -315,7 +315,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, if (lvl >= hw->cap.nix_tx_aggr_lvl) { if ((nix_get_tx_link(rvu, map_func) != nix_get_tx_link(rvu, pcifunc)) && - (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) + (rvu_get_pf(rvu->pdev, map_func) != + rvu_get_pf(rvu->pdev, pcifunc))) return false; else return true; @@ -339,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, bool from_vf; int err; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && type != NIX_INTF_TYPE_SDP) return 0; @@ -416,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, break; case NIX_INTF_TYPE_SDP: from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; sdp_info = parent_pf->sdp_info; if (!sdp_info) { dev_err(rvu->dev, "Invalid sdp_info pointer\n"); @@ -590,12 +591,12 @@ static int nix_bp_disable(struct rvu *rvu, u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; if (cpt_link && !rvu->hw->cpt_links) @@ -736,9 +737,9 @@ static int nix_bp_enable(struct rvu *rvu, u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ @@ -1674,7 +1675,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, } intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) intf = NIX_INTF_TYPE_SDP; err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, @@ -1798,7 +1799,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); if (rc < 0) { dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return NIX_AF_ERR_MARK_CFG_FAIL; } @@ -2050,7 +2052,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ @@ -2068,7 +2070,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, int link, int *start, int *end) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); /* LBK links */ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { @@ -2426,7 +2428,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, { struct nix_smq_flush_ctx *smq_flush_ctx; int err, restore_tx_en = 0, i; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; u16 tl2_tl3_link_schq; u8 link, link_level; @@ -2820,7 +2822,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, { struct rvu_hwinfo *hw = rvu->hw; int lbk_link_start, lbk_links; - u8 pf = rvu_get_pf(pcifunc); + u8 pf = rvu_get_pf(rvu->pdev, pcifunc); int schq; u64 cfg; @@ -3190,7 +3192,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return err; } return 0; @@ -3458,7 +3461,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, - pcifunc >> RVU_PFVF_PF_SHIFT); + rvu_get_pf(rvu->pdev, pcifunc)); return -EINVAL; } @@ -3510,7 +3513,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, struct rvu_pfvf *pfvf; if (!hw->cap.nix_rx_multicast || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, + pcifunc & ~RVU_PFVF_FUNC_MASK))) { *mce_list = NULL; *mce_idx = 0; return; @@ -3544,13 +3548,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ - if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return 0; @@ -3619,7 +3623,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pcifunc |= idx; /* Add dummy entries now, so that we don't have to check * for whether AQ_OP should be INIT/WRITE later on. @@ -4554,7 +4558,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, static void nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct rvu_pfvf *pfvf; int maxlen, minlen; int numvfs, hwvf; @@ -4601,7 +4605,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; @@ -5251,7 +5255,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, true); @@ -5284,7 +5288,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; @@ -5296,7 +5300,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; u64 sa_base; @@ -5385,7 +5389,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int nixlf; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index da15bb451178..c7c70429eb6c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type) { - int pf = rvu_get_pf(pcifunc); + struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); + struct rvu *rvu = hw->rvu; + int pf = rvu_get_pf(rvu->pdev, pcifunc); int index; /* Check if this is for a PF */ @@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); @@ -3434,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr, nixlf, rc, intf_mode; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u64 rxpkind, txpkind; u8 cgx_id, lmac_id; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index d2661e7fabdb..999f6d93c7fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct npc_exact_table_entry *entry; struct npc_exact_table *table; struct rvu_pfvf *pfvf; @@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; int rc = 0; @@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); int rc; rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); @@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu) } /* Filter rules are only for PF */ - pcifunc = RVU_PFFUNC(i, 0); + pcifunc = RVU_PFFUNC(rvu->pdev, i, 0); dev_dbg(rvu->dev, "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h index 57a09328d46b..cb25cf478f1f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h @@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { #define NPC_MCAM_DROP_RULE_MAX 30 #define NPC_MCAM_SDP_DROP_RULE_IDX 0 -#define RVU_PFFUNC(pf, func) \ - ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ - (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) +#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) enum npc_exact_opc_type { NPC_EXACT_OPC_MEM, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c index 32953cca108c..03099bc570bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -39,7 +39,7 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) struct rep_event *msg; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); @@ -114,10 +114,10 @@ int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) struct rep_event *req; int pf; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - pf = rvu_get_pf(rvu->rep_pcifunc); + pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc); mutex_lock(&rvu->mbox_lock); req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); @@ -325,7 +325,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_get_nix_blkaddr(rvu, pcifunc); rep = true; for (i = 0; i < 2; i++) { @@ -345,8 +345,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << RVU_PFVF_PF_SHIFT | - ((vf + 1) & RVU_PFVF_FUNC_MASK); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1); rvu_get_nix_blkaddr(rvu, pcifunc); /* Skip installimg rules if nixlf is not attached */ @@ -454,7 +453,7 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu->rep2pfvf_map[rep] = pcifunc; rsp->rep_pf_map[rep] = pcifunc; rep++; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index 38cfe148f4b7..e4a5f9fa6fd4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -17,9 +17,9 @@ /* SDP PF number */ static int sdp_pf_num[MAX_SDP] = {-1, -1}; -bool is_sdp_pfvf(u16 pcifunc) +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc) { - u16 pf = rvu_get_pf(pcifunc); + u16 pf = rvu_get_pf(rvu->pdev, pcifunc); u32 found = 0, i = 0; while (i < MAX_SDP) { @@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc) return true; } -bool is_sdp_pf(u16 pcifunc) +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc) { - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !(pcifunc & RVU_PFVF_FUNC_MASK)); } @@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc) if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) return (rvu->vf_devid == RVU_SDP_VF_DEVID); - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 77ac94cb2ec4..0596a3ac4c12 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -33,7 +33,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, BLKADDR_APR = 0x16ULL, - BLK_COUNT = 0x17ULL, + BLKADDR_MBOX = 0x1bULL, + BLK_COUNT = 0x1cULL, }; /* RVU Block Type Enumeration */ @@ -49,7 +50,8 @@ enum rvu_block_type_e { BLKTYPE_TIM = 0x8, BLKTYPE_CPT = 0x9, BLKTYPE_NDC = 0xa, - BLKTYPE_MAX = 0xa, + BLKTYPE_MBOX = 0x13, + BLKTYPE_MAX = 0x13, }; /* RVU Admin function Interrupt Vector Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 268efb7c1c15..49ce38685a7e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); /* rvu_get_nix_blkaddr sets up the corresponding NIX block * address and NIX RX and TX interfaces for a pcifunc. * Generally it is called during attach call of a pcifunc but it @@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_get_nix_blkaddr(rvu, pcifunc); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); @@ -236,7 +236,7 @@ void rvu_switch_disable(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, @@ -248,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 69e0778f9ac1..883e9f4d601c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ + otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o rvu_nicvf-y := otx2_vf.o rvu_rep-y := rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 7f6a435ac680..bec7d5b4d7cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -14,6 +14,7 @@ static struct dev_hw_ops otx2_hw_ops = { .sqe_flush = otx2_sqe_flush, .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -21,8 +22,20 @@ static struct dev_hw_ops cn10k_hw_ops = { .sqe_flush = cn10k_sqe_flush, .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; +void otx2_init_hw_ops(struct otx2_nic *pfvf) +{ + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; + return; + } + + pfvf->hw_ops = &cn10k_hw_ops; +} +EXPORT_SYMBOL(otx2_init_hw_ops); + int cn10k_lmtst_init(struct otx2_nic *pfvf) { @@ -30,12 +43,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) struct otx2_lmt_info *lmt_info; int err, cpu; - if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { - pfvf->hw_ops = &otx2_hw_ops; + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) return 0; - } - pfvf->hw_ops = &cn10k_hw_ops; /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE); pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index e3f0bce9908f..945ab10bd4ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf); int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, u32 burst, u64 rate, bool pps); int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf); +void otx2_init_hw_ops(struct otx2_nic *pfvf); #endif /* CN10K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index a6500e3673f2..c691f0722154 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -481,7 +481,7 @@ static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) goto set_available; /* Trigger CTX flush to write dirty data back to DRAM */ - reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7); otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); set_available: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h index 9965df0faa3e..43fbce0d6039 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -220,7 +220,7 @@ struct cpt_sg_s { #define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) /* CPT LF CTX Flush Register */ -#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) +#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0) #ifdef CONFIG_XFRM_OFFLOAD int cn10k_ipsec_init(struct net_device *netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c new file mode 100644 index 000000000000..ec8cde98076d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include "otx2_common.h" +#include "otx2_reg.h" +#include "otx2_struct.h" +#include "cn10k.h" + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); +/* CN20K mbox AF => PFx irq handler */ +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = pf_irq; + struct mbox *mw = &pf->mbox; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 pf_trig_val; + + pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL; + + /* Clear the IRQ */ + otx2_write64(pf, RVU_PF_INT, pf_trig_val); + + if (pf_trig_val & BIT_ULL(0)) { + mbox = &mw->mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_up_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", + BIT_ULL(0)); + } + + if (pf_trig_val & BIT_ULL(1)) { + mbox = &mw->mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_wrk); + trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", + BIT_ULL(1)); + } + + return IRQ_HANDLED; +} + +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq) +{ + struct otx2_nic *vf = vf_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 vf_trig_val; + + vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL; + /* Clear the IRQ */ + otx2_write64(vf, RVU_VF_INT, vf_trig_val); + + /* Read latest mbox data */ + smp_rmb(); + + if (vf_trig_val & BIT_ULL(1)) { + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF", + BIT_ULL(1)); + } + + if (vf_trig_val & BIT_ULL(0)) { + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF", + BIT_ULL(0)); + } + + return IRQ_HANDLED; +} + +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + /* Clear PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + + /* Enable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + if (numvfs > 64) { + numvfs -= 64; + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + } +} + +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + int vector, intr_vec, vec = 0; + + /* Disable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull); + + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + + if (numvfs > 64) { + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + } + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + vector = pci_irq_vector(pf->pdev, intr_vec); + free_irq(vector, pf->hw.pfvf_irq_devid[vec]); + } +} + +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct pf_irq_data *irq_data = pf_irq; + struct otx2_nic *pf = irq_data->pf; + struct mbox *mbox; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = otx2_read64(pf, irq_data->intr_status); + otx2_write64(pf, irq_data->intr_status, intr); + mbox = pf->mbox_pfvf; + + if (intr) + trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr); + + irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start, + irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + struct otx2_hw *hw = &pf->hw; + struct pf_irq_data *irq_data; + int intr_vec, ret, vec = 0; + char *irq_name; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(pf->dev, 4, + sizeof(struct pf_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].pf = pf; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[intr_vec * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev, + pf->pcifunc), vec / 2, vec % 2); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d", + vec / 2, vec % 2); + + hw->pfvf_irq_devid[vec] = &irq_data[vec]; + ret = request_irq(pci_irq_vector(pf->pdev, intr_vec), + pf->hw_ops->pfvf_mbox_intr_handler, 0, + irq_name, + &irq_data[vec]); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); + return ret; + } + } + + cn20k_enable_pfvf_mbox_intr(pf, numvfs); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h new file mode 100644 index 000000000000..832adaf8c57f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_H +#define CN20K_H + +#include "otx2_common.h" + +void cn20k_init(struct otx2_nic *pfvf); +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +#endif /* CN20K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 6f572589f1e5..9a10396e7504 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -28,12 +28,12 @@ static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -41,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -860,9 +860,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) { int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { sq = &pfvf->qset.sq[qidx]; if (!sq->sqb_ptrs) @@ -1822,7 +1823,7 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) req->chan_cnt = IEEE_8021QAZ_MAX_TCS; req->bpid_per_chan = 1; } else { - req->chan_cnt = 1; + req->chan_cnt = pfvf->hw.rx_chan_cnt; req->bpid_per_chan = 0; } @@ -1847,7 +1848,7 @@ int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) req->chan_cnt = IEEE_8021QAZ_MAX_TCS; req->bpid_per_chan = 1; } else { - req->chan_cnt = 1; + req->chan_cnt = pfvf->hw.rx_chan_cnt; req->bpid_per_chan = 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index ca0e6ab12ceb..6b59881f78e0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -28,10 +28,12 @@ #include "otx2_reg.h" #include "otx2_txrx.h" #include "otx2_devlink.h" +#include <rvu.h> #include <rvu_trace.h> #include "qos.h" #include "rep.h" #include "cn10k_ipsec.h" +#include "cn20k.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -61,6 +63,12 @@ /* Number of segments per SG structure */ #define MAX_SEGS_PER_SG 3 +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq); +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq); + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -245,6 +253,7 @@ struct otx2_hw { u16 nix_msixoff; /* Offset of NIX vectors */ char *irq_name; cpumask_var_t *affinity_mask; + struct pf_irq_data *pfvf_irq_devid[4]; /* Stats */ struct otx2_dev_stats dev_stats; @@ -366,6 +375,9 @@ struct dev_hw_ops { int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); + irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); }; #define CN10K_MCS_SA_PER_SC 4 @@ -433,6 +445,16 @@ struct cn10k_mcs_cfg { struct list_head rxsc_list; }; +struct pf_irq_data { + u64 intr_status; + void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw, + int first, int mdevs, u64 intr); + struct otx2_nic *pf; + int vec_num; + int start; + int mdevs; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -476,6 +498,7 @@ struct otx2_nic { struct mbox *mbox_pfvf; struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_pfvf_wq; + struct qmem *pfvf_mbox_addr; u8 total_vfs; u16 pcifunc; /* RVU PF_FUNC */ @@ -730,8 +753,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); } -static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) { + u64 __iomem *ptr = addr; u64 result; __asm__ volatile(".cpu generic+lse\n" @@ -744,7 +768,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) #else #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) -#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) + +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) +{ + return 0; +} #endif static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, @@ -794,7 +822,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) /* Alloc pointer from pool/aura */ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) { - u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); + void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); u64 incr = (u64)aura | BIT_ULL(63); return otx2_atomic64_add(incr, ptr); @@ -899,21 +927,11 @@ MBOX_UP_MCS_MESSAGES /* Time to wait before watchdog kicks off */ #define OTX2_TX_TIMEOUT (100 * HZ) -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF - static inline bool is_otx2_vf(u16 pcifunc) { return !!(pcifunc & RVU_PFVF_FUNC_MASK); } -static inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, struct page *page, size_t offset, size_t size, @@ -1191,4 +1209,6 @@ dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, struct sk_buff *skb, int seg, int *len); void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 45b8c9230184..9b7f847b9c22 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -559,10 +559,13 @@ static int otx2_set_coalesce(struct net_device *netdev, return 0; } -static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_get_rss_hash_opts(struct net_device *dev, + struct ethtool_rxfh_fields *nfc) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + + rss = &pfvf->hw.rss_info; if (!(rss->flowkey_cfg & (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) @@ -609,12 +612,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, return 0; } -static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_set_rss_hash_opts(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; - u32 rss_cfg = rss->flowkey_cfg; + struct otx2_rss_info *rss; + u32 rss_cfg; + + rss = &pfvf->hw.rss_info; + rss_cfg = rss->flowkey_cfg; if (!rss->enable) { netdev_err(pfvf->netdev, @@ -743,8 +751,6 @@ static int otx2_get_rxnfc(struct net_device *dev, if (netif_running(dev) && ntuple) ret = otx2_get_all_flows(pfvf, nfc, rules); break; - case ETHTOOL_GRXFH: - return otx2_get_rss_hash_opts(pfvf, nfc); default: break; } @@ -759,9 +765,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { - case ETHTOOL_SRXFH: - ret = otx2_set_rss_hash_opts(pfvf, nfc); - break; case ETHTOOL_SRXCLSRLINS: if (netif_running(dev) && ntuple) ret = otx2_add_flow(pfvf, nfc); @@ -1329,6 +1332,8 @@ static const struct ethtool_ops otx2_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, .get_msglevel = otx2_get_msglevel, .set_msglevel = otx2_set_msglevel, .get_pauseparam = otx2_get_pauseparam, @@ -1442,6 +1447,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, .get_ringparam = otx2_get_ringparam, .set_ringparam = otx2_set_ringparam, .get_coalesce = otx2_get_coalesce, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index db7c466fdc39..4e2d1206e1b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -206,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register ME interrupt handler*/ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) { @@ -216,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register FLR interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) { @@ -228,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) if (numvfs > 64) { irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFME1), otx2_pf_me_intr_handler, 0, irq_name, pf); @@ -238,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) } irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFFLR1), otx2_pf_flr_intr_handler, 0, irq_name, pf); @@ -294,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) return 0; } -static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, - int first, int mdevs, u64 intr) +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -545,7 +547,7 @@ end: } } -static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); int vfs = pf->total_vfs; @@ -574,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) return IRQ_HANDLED; } +static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs) +{ + struct qmem *mbox_addr; + int err; + + err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE); + if (err) { + dev_err(pf->dev, "qmem alloc fail\n"); + return ERR_PTR(-ENOMEM); + } + + otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + pf->pfvf_mbox_addr = mbox_addr; + + return mbox_addr->base; +} + static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) { void __iomem *hwbase; @@ -595,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) if (!pf->mbox_pfvf_wq) return -ENOMEM; - /* On CN10K platform, PF <-> VF mailbox region follows after - * PF <-> AF mailbox region. + /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF + * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access PF/VF mailbox regions. */ - if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - MBOX_SIZE; - else - base = readq((void __iomem *)((u64)pf->reg_base + - RVU_PF_VF_BAR4_ADDR)); + if (is_cn20k(pf->pdev)) { + hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs); + } else { + /* On CN10K platform, PF <-> VF mailbox region follows after + * PF <-> AF mailbox region. + */ + if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) + base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + + MBOX_SIZE; + else + base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR); - hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); - if (!hwbase) { - err = -ENOMEM; - goto free_wq; + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); + if (!hwbase) { + err = -ENOMEM; + goto free_wq; + } } mbox = &pf->mbox_pfvf[0]; @@ -632,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) return 0; free_iomem: - if (hwbase) + if (hwbase && !(is_cn20k(pf->pdev))) iounmap(hwbase); free_wq: destroy_workqueue(pf->mbox_pfvf_wq); @@ -651,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) pf->mbox_pfvf_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap(mbox->mbox.hwbase); + else + qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr); otx2_mbox_destroy(&mbox->mbox); } @@ -676,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { int vector; + if (is_cn20k(pf->pdev)) + return cn20k_disable_pfvf_mbox_intr(pf, numvfs); + /* Disable PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); @@ -697,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) char *irq_name; int err; + if (is_cn20k(pf->pdev)) + return cn20k_register_pfvf_mbox_intr(pf, numvfs); + /* Register MBOX0 interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), @@ -717,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox1", + rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); err = request_irq(pci_irq_vector(pf->pdev, @@ -1006,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) otx2_mbox_msg_send(mbox, 0); } -static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; struct mbox *mw = &pf->mbox; @@ -1064,10 +1099,18 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) void otx2_disable_mbox_intr(struct otx2_nic *pf) { - int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + int vector; /* Disable AF => PF mailbox IRQ */ - otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + } else { + vector = pci_irq_vector(pf->pdev, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, + BIT_ULL(0) | BIT_ULL(1)); + } free_irq(vector, pf); } EXPORT_SYMBOL(otx2_disable_mbox_intr); @@ -1080,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) int err; /* Register mailbox interrupt handler */ - irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); - err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), - otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); + if (!is_cn20k(pf->pdev)) { + irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } else { + irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX * + NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFAF mbox irq\n"); @@ -1093,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) /* Enable mailbox interrupt for msgs coming from AF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); - otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1)); + } if (!probe_af) return 0; @@ -1125,7 +1188,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) pf->mbox_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap((void __iomem *)mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); @@ -1145,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf) if (!pf->mbox_wq) return -ENOMEM; - /* Mailbox is a reserved memory (in RAM) region shared between - * admin function (i.e AF) and this PF, shouldn't be mapped as - * device memory to allow unaligned accesses. + /* For CN20K, AF allocates mbox memory in DRAM and writes PF + * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access AF/PF mailbox regions. */ - hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), - MBOX_SIZE); + if (is_cn20k(pf->pdev)) + hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX + + ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT); + else + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e AF) and this PF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start + (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE); if (!hwbase) { dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); err = -ENOMEM; @@ -1323,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; struct otx2_snd_queue *sq; - u64 val, *ptr; - u64 qidx = 0; + void __iomem *ptr; + u64 val, qidx = 0; /* CQ */ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { @@ -1972,7 +2043,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for QERR\n", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); goto err_disable_napi; } @@ -1990,7 +2061,7 @@ int otx2_open(struct net_device *netdev) if (name_len >= NAME_SIZE) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); err = -EINVAL; goto err_free_cints; } @@ -2001,7 +2072,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); goto err_free_cints; } vec++; @@ -2998,8 +3069,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) if (err) return err; - err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, - RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (!is_cn20k(pf->pdev)) + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + else + err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT, + RVU_MBOX_PF_INT_VEC_CNT, + PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", __func__, num_vec); @@ -3008,6 +3084,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) otx2_setup_dev_hw_settings(pf); + if (is_cn20k(pf->pdev)) + cn20k_init(pf); + else + otx2_init_hw_ops(pf); + /* Init PF <=> AF mailbox stuff */ err = otx2_pfaf_mbox_init(pf); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index e3aee6e36215..1cd576fd09c5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -44,6 +44,17 @@ #define RVU_PF_VF_MBOX_ADDR (0xC40) #define RVU_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + /* RVU VF registers */ #define RVU_VF_VFPF_MBOX0 (0x00000) #define RVU_VF_VFPF_MBOX1 (0x00008) @@ -58,6 +69,11 @@ #define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) #define RVU_VF_MBOX_REGION (0xC0000) +/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_PFX_FUNC_PFAF_MBOX (0x80000) +#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000) + #define RVU_FUNC_BLKADDR_SHIFT 20 #define RVU_FUNC_BLKADDR_MASK 0x1FULL @@ -138,39 +154,12 @@ #define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) -/* NIX AF transmit scheduler registers */ -#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) -#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) -#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) -#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) -#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) -#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) -#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) -#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) -#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) -#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) -#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) -#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) -#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) -#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) -#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) -#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) -#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) -#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) -#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) -#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) -#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) -#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) -#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) -#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) -#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) -#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) -#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) -#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) - /* LMT LF registers */ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) #define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12) #define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400) +/* CN20K registers */ +#define RVU_PF_DISC (0x0) + #endif /* OTX2_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 9a226ca74425..5f80b23c5335 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -467,7 +467,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, target = act->dev; if (target->dev.parent) { priv = netdev_priv(target); - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + if (rvu_get_pf(nic->pdev, nic->pcifunc) != + rvu_get_pf(nic->pdev, priv->pcifunc)) { NL_SET_ERR_MSG_MOD(extack, "can't redirect to other pf/vf"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 8a8b598bd389..5589fccd370b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -240,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) /* Disable VF => PF mailbox IRQ */ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); + + if (is_cn20k(vf->pdev)) + otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1)); + free_irq(vector, vf); } @@ -252,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Register mailbox interrupt handler */ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); - err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), - otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc & + RVU_PFVF_FUNC_MASK) - 1)); + + if (!is_cn20k(vf->pdev)) { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + } else { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name, + vf); + } + if (err) { dev_err(vf->dev, "RVUPF: IRQ registration failed for VFAF mbox irq\n"); @@ -264,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Enable mailbox interrupt for msgs coming from PF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); - otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(vf->pdev)) { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) | + BIT_ULL(2) | BIT_ULL(3)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3)); + } if (!probe_pf) return 0; @@ -315,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) if (!vf->mbox_wq) return -ENOMEM; - if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { + /* For cn20k platform, VF mailbox region is in dram aliased from AF + * VF MBOX ADDR, MBOX is a separate RVU block. + */ + if (is_cn20k(vf->pdev)) { + hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX << + RVU_FUNC_BLKADDR_SHIFT); + } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { /* For cn10k platform, VF mailbox region is in its BAR2 * register space */ @@ -616,6 +642,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } otx2_setup_dev_hw_settings(vf); + + if (is_cn20k(vf->pdev)) + cn20k_init(vf); + else + otx2_init_hw_ops(vf); + /* Init VF <=> PF mailbox stuff */ err = otx2vf_vfaf_mbox_init(vf); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index 58d572ce08ef..2872adabc830 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) { int sqe_tail, sqe_head; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); incr = (u64)qidx << 32; val = otx2_atomic64_add(incr, ptr); sqe_head = (val >> 20) & 0x3F; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 2cd3da3b6843..25af98034e2e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -244,10 +244,10 @@ static int rvu_rep_devlink_port_register(struct rep_dev *rep) if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); } else { attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; } @@ -672,7 +672,8 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) rep->pcifunc = pcifunc; snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", - rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + rvu_get_pf(priv->pdev, pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK)); ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 752a72499b4f..be80da03a594 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -290,9 +290,6 @@ static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct ieee_ets *my_ets = &priv->ets; - if (!my_ets) - return -EINVAL; - ets->ets_cap = IEEE_8021QAZ_MAX_TCS; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 281b34af0bb4..d2071aff7b8f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2670,8 +2670,7 @@ static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table) static const struct udp_tunnel_nic_info mlx4_udp_tunnels = { .sync_table = mlx4_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index febeadfdd5a5..03d2fc7d9b09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -49,6 +49,8 @@ #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> +#include <rdma/ib_verbs.h> + #include "mlx4.h" #include "fw.h" #include "icm.h" @@ -1246,14 +1248,6 @@ err_out: return err ? err : count; } -enum ibta_mtu { - IB_MTU_256 = 1, - IB_MTU_512 = 2, - IB_MTU_1024 = 3, - IB_MTU_2048 = 4, - IB_MTU_4096 = 5 -}; - static inline int int_to_ibta_mtu(int mtu) { switch (mtu) { @@ -1266,7 +1260,7 @@ static inline int int_to_ibta_mtu(int mtu) } } -static inline int ibta_mtu_to_int(enum ibta_mtu mtu) +static inline int ibta_mtu_to_int(enum ib_mtu mtu) { switch (mtu) { case IB_MTU_256: return 256; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 73cd74644378..42218834183a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -35,6 +35,55 @@ static u16 mlx5_fw_ver_subminor(u32 version) return version & 0xffff; } +static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct pci_dev *pdev = dev->pdev; + unsigned int vpd_size, kw_len; + char *str, *end; + u8 *vpd_data; + int err = 0; + int start; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) + return 0; + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start >= 0) { + str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL); + if (!str) { + err = -ENOMEM; + goto end; + } + end = strchrnul(str, ' '); + *end = '\0'; + err = devlink_info_board_serial_number_put(req, str); + kfree(str); + if (err) + goto end; + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len); + if (start >= 0) { + str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL); + if (!str) { + err = -ENOMEM; + goto end; + } + err = devlink_info_serial_number_put(req, str); + kfree(str); + if (err) + goto end; + } + +end: + kfree(vpd_data); + return err; +} + #define DEVLINK_FW_STRING_LEN 32 static int @@ -49,6 +98,10 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, if (!mlx5_core_is_pf(dev)) return 0; + err = mlx5_devlink_serial_numbers_put(dev, req, extack); + if (err) + return err; + err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 5b0d03b3efe8..65a73913b9a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -278,10 +278,6 @@ enum packet_merge { struct mlx5e_packet_merge_param { enum packet_merge type; u32 timeout; - struct { - u8 match_criteria_type; - u8 alignment_granularity; - } shampo; }; struct mlx5e_params { @@ -557,7 +553,7 @@ struct mlx5e_icosq { } ____cacheline_aligned_in_smp; struct mlx5e_frag_page { - struct page *page; + netmem_ref netmem; u16 frags; }; @@ -638,7 +634,6 @@ struct mlx5e_shampo_hd { struct mlx5e_frag_page *pages; u32 hd_per_wq; u16 hd_per_wqe; - u16 pages_per_wq; unsigned long *bitmap; u16 pi; u16 ci; @@ -721,7 +716,11 @@ struct mlx5e_rq { struct bpf_prog __rcu *xdp_prog; struct mlx5e_xdpsq *xdpsq; DECLARE_BITMAP(flags, 8); + + /* page pools */ struct page_pool *page_pool; + struct page_pool *hd_page_pool; + struct mlx5e_xdp_buff mxbuf; /* AF_XDP zero-copy */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 58ec5e44aa7a..fc945bce933a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -901,6 +901,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + u32 lro_timeout; int ndsegs = 1; int err; @@ -926,22 +927,25 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, MLX5_SET(wq, wq, log_wqe_stride_size, log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); - if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { - MLX5_SET(wq, wq, shampo_enable, true); - MLX5_SET(wq, wq, log_reservation_size, - mlx5e_shampo_get_log_rsrv_size(mdev, params)); - MLX5_SET(wq, wq, - log_max_num_of_packets_per_reservation, - mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); - MLX5_SET(wq, wq, log_headers_entry_size, - mlx5e_shampo_get_log_hd_entry_size(mdev, params)); - MLX5_SET(rqc, rqc, reservation_timeout, - mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT)); - MLX5_SET(rqc, rqc, shampo_match_criteria_type, - params->packet_merge.shampo.match_criteria_type); - MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, - params->packet_merge.shampo.alignment_granularity); - } + if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) + break; + + MLX5_SET(wq, wq, shampo_enable, true); + MLX5_SET(wq, wq, log_reservation_size, + mlx5e_shampo_get_log_rsrv_size(mdev, params)); + MLX5_SET(wq, wq, + log_max_num_of_packets_per_reservation, + mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); + MLX5_SET(wq, wq, log_headers_entry_size, + mlx5e_shampo_get_log_hd_entry_size(mdev, params)); + lro_timeout = + mlx5e_choose_lro_timeout(mdev, + MLX5E_DEFAULT_SHAMPO_TIMEOUT); + MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout); + MLX5_SET(rqc, rqc, shampo_match_criteria_type, + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED); + MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE); break; } default: /* MLX5_WQ_TYPE_CYCLIC */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index e837c21d3d21..6501252359b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -362,7 +362,8 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); break; case MLX5E_DMA_MAP_PAGE: - dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); + netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size, + DMA_TO_DEVICE, 0); break; default: WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3cb8d3bf9044..35479cbf98d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -32,6 +32,7 @@ #include <linux/dim.h> #include <linux/ethtool_netlink.h> +#include <net/netdev_queues.h> #include "en.h" #include "en/channels.h" @@ -365,11 +366,6 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->tx_pending = 1 << priv->channels.params.log_sq_size; - - kernel_param->tcp_data_split = - (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ? - ETHTOOL_TCP_DATA_SPLIT_ENABLED : - ETHTOOL_TCP_DATA_SPLIT_DISABLED; } static void mlx5e_get_ringparam(struct net_device *dev, @@ -382,6 +378,27 @@ static void mlx5e_get_ringparam(struct net_device *dev, mlx5e_ethtool_get_ringparam(priv, param, kernel_param); } +static bool mlx5e_ethtool_set_tcp_data_split(struct mlx5e_priv *priv, + u8 tcp_data_split, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->netdev; + + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + !(dev->features & NETIF_F_GRO_HW)) { + NL_SET_ERR_MSG_MOD(extack, + "TCP-data-split is not supported when GRO HW is disabled"); + return false; + } + + /* Might need to disable HW-GRO if it was kept on due to hds. */ + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && + dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED) + netdev_update_features(priv->netdev); + + return true; +} + int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param, struct netlink_ext_ack *extack) @@ -440,6 +457,11 @@ static int mlx5e_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); + if (!mlx5e_ethtool_set_tcp_data_split(priv, + kernel_param->tcp_data_split, + extack)) + return -EINVAL; + return mlx5e_ethtool_set_ringparam(priv, param, extack); } @@ -2616,12 +2638,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev, const struct ethtool_ops mlx5e_ethtool_ops = { .cap_link_lanes_supported = true, .cap_rss_ctx_supported = true, + .rxfh_per_ctx_fields = true, .rxfh_per_ctx_key = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_USE_CQE, .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ext_state = mlx5e_get_link_ext_state, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ea822c69d137..dca5ca51a470 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -41,6 +41,7 @@ #include <linux/filter.h> #include <net/netdev_lock.h> #include <net/netdev_queues.h> +#include <net/netdev_rx_queue.h> #include <net/page_pool/types.h> #include <net/pkt_sched.h> #include <net/xdp_sock_drv.h> @@ -78,7 +79,8 @@ static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev) { - if (!MLX5_CAP_GEN(mdev, shampo)) + if (!MLX5_CAP_GEN(mdev, shampo) || + !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge)) return false; /* Our HW-GRO implementation relies on "KSM Mkey" for @@ -331,47 +333,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } -static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node) -{ - rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), - GFP_KERNEL, node); - if (!rq->mpwqe.shampo) - return -ENOMEM; - return 0; -} - -static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq) -{ - kvfree(rq->mpwqe.shampo); -} - -static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) -{ - struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - - shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, - node); - shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq, - sizeof(*shampo->pages)), - GFP_KERNEL, node); - if (!shampo->bitmap || !shampo->pages) - goto err_nomem; - - return 0; - -err_nomem: - bitmap_free(shampo->bitmap); - kvfree(shampo->pages); - - return -ENOMEM; -} - -static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) -{ - bitmap_free(rq->mpwqe.shampo->bitmap); - kvfree(rq->mpwqe.shampo->pages); -} - static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); @@ -584,19 +545,18 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq } static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, - struct mlx5e_rq *rq) + u16 hd_per_wq, u32 *umr_mkey) { u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); - if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) { + if (max_ksm_size < hd_per_wq) { mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", - max_ksm_size, rq->mpwqe.shampo->hd_per_wq); + max_ksm_size, hd_per_wq); return -EINVAL; } - - return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + return mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq, MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE, - &rq->mpwqe.shampo->mkey); + umr_mkey); } static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) @@ -758,6 +718,42 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param xdp_frag_size); } +static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq, + int node) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + + shampo->hd_per_wq = hd_per_wq; + + shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node); + shampo->pages = kvzalloc_node(array_size(hd_per_wq, + sizeof(*shampo->pages)), + GFP_KERNEL, node); + if (!shampo->bitmap || !shampo->pages) + goto err_nomem; + + return 0; + +err_nomem: + kvfree(shampo->pages); + bitmap_free(shampo->bitmap); + + return -ENOMEM; +} + +static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) +{ + kvfree(rq->mpwqe.shampo->pages); + bitmap_free(rq->mpwqe.shampo->bitmap); +} + +static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq) +{ + struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix); + + return !!rxq->mp_params.mp_ops; +} + static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -765,42 +761,81 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, u32 *pool_size, int node) { + void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); + u32 hd_pool_size; + u16 hd_per_wq; + int wq_size; int err; if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) return 0; - err = mlx5e_rq_shampo_hd_alloc(rq, node); - if (err) - goto out; - rq->mpwqe.shampo->hd_per_wq = - mlx5e_shampo_hd_per_wq(mdev, params, rqp); - err = mlx5e_create_rq_hd_umr_mkey(mdev, rq); + + rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), + GFP_KERNEL, node); + if (!rq->mpwqe.shampo) + return -ENOMEM; + + /* split headers data structures */ + hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp); + err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node); if (err) - goto err_shampo_hd; - err = mlx5e_rq_shampo_hd_info_alloc(rq, node); + goto err_shampo_hd_info_alloc; + + err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq, + &rq->mpwqe.shampo->mkey); if (err) - goto err_shampo_info; + goto err_umr_mkey; + + rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey); + rq->mpwqe.shampo->hd_per_wqe = + mlx5e_shampo_hd_per_wqe(mdev, params, rqp); + wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); + hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) / + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + + if (mlx5_rq_needs_separate_hd_pool(rq)) { + /* Separate page pool for shampo headers */ + struct page_pool_params pp_params = { }; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = hd_pool_size; + pp_params.nid = node; + pp_params.dev = rq->pdev; + pp_params.napi = rq->cq.napi; + pp_params.netdev = rq->netdev; + pp_params.dma_dir = rq->buff.map_dir; + pp_params.max_len = PAGE_SIZE; + + rq->hd_page_pool = page_pool_create(&pp_params); + if (IS_ERR(rq->hd_page_pool)) { + err = PTR_ERR(rq->hd_page_pool); + rq->hd_page_pool = NULL; + goto err_hds_page_pool; + } + } else { + /* Common page pool, reserve space for headers. */ + *pool_size += hd_pool_size; + rq->hd_page_pool = NULL; + } + + /* gro only data structures */ rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node); if (!rq->hw_gro_data) { err = -ENOMEM; goto err_hw_gro_data; } - rq->mpwqe.shampo->key = - cpu_to_be32(rq->mpwqe.shampo->mkey); - rq->mpwqe.shampo->hd_per_wqe = - mlx5e_shampo_hd_per_wqe(mdev, params, rqp); - rq->mpwqe.shampo->pages_per_wq = - rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; - *pool_size += rq->mpwqe.shampo->pages_per_wq; + return 0; err_hw_gro_data: - mlx5e_rq_shampo_hd_info_free(rq); -err_shampo_info: + page_pool_destroy(rq->hd_page_pool); +err_hds_page_pool: mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey); -err_shampo_hd: - mlx5e_rq_shampo_hd_free(rq); -out: +err_umr_mkey: + mlx5e_rq_shampo_hd_info_free(rq); +err_shampo_hd_info_alloc: + kvfree(rq->mpwqe.shampo); return err; } @@ -810,9 +845,11 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq) return; kvfree(rq->hw_gro_data); + if (rq->hd_page_pool != rq->page_pool) + page_pool_destroy(rq->hd_page_pool); mlx5e_rq_shampo_hd_info_free(rq); mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey); - mlx5e_rq_shampo_hd_free(rq); + kvfree(rq->mpwqe.shampo); } static int mlx5e_alloc_rq(struct mlx5e_params *params, @@ -929,6 +966,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, pp_params.netdev = rq->netdev; pp_params.dma_dir = rq->buff.map_dir; pp_params.max_len = PAGE_SIZE; + pp_params.queue_idx = rq->ix; + + /* Shampo header data split allow for unreadable netmem */ + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM; /* page_pool can be used even when there is no rq->xdp_prog, * given page_pool does not handle DMA mapping there is no @@ -941,6 +983,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, rq->page_pool = NULL; goto err_free_by_rq_type; } + if (!rq->hd_page_pool) + rq->hd_page_pool = rq->page_pool; if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); @@ -4043,10 +4087,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) if (enable) { new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO; - new_params.packet_merge.shampo.match_criteria_type = - MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED; - new_params.packet_merge.shampo.alignment_granularity = - MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE; } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE; } else { @@ -4373,6 +4413,7 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { + struct netdev_config *cfg = netdev->cfg_pending; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_vlan_table *vlan; struct mlx5e_params *params; @@ -4439,6 +4480,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, } } + /* The header-data split ring param requires HW GRO to stay enabled. */ + if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + !(features & NETIF_F_GRO_HW)) { + netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n"); + features |= NETIF_F_GRO_HW; + } + if (mlx5e_is_uplink_rep(priv)) { features = mlx5e_fix_uplink_rep_features(netdev, features); netdev->netns_immutable = true; @@ -5303,8 +5351,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) priv->nic_info.set_port = mlx5e_vxlan_set_port; priv->nic_info.unset_port = mlx5e_vxlan_unset_port; - priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN; + priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN; priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; /* Don't count the space hard-coded to the IANA port */ priv->nic_info.tables[0].n_entries = @@ -5454,6 +5501,103 @@ static const struct netdev_stat_ops mlx5e_stat_ops = { .get_base_stats = mlx5e_get_base_stats, }; +struct mlx5_qmgmt_data { + struct mlx5e_channel *c; + struct mlx5e_channel_param cparam; +}; + +static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq, + int queue_index) +{ + struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq; + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channels *chs = &priv->channels; + struct mlx5e_params params = chs->params; + struct mlx5_core_dev *mdev; + int err; + + mutex_lock(&priv->state_lock); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = -ENODEV; + goto unlock; + } + + if (queue_index >= chs->num) { + err = -ERANGE; + goto unlock; + } + + if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || + chs->params.ptp_rx || + chs->params.xdp_prog || + priv->htb) { + netdev_err(priv->netdev, + "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n"); + err = -EOPNOTSUPP; + goto unlock; + } + + mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, queue_index); + err = mlx5e_build_channel_param(mdev, ¶ms, &new->cparam); + if (err) + goto unlock; + + err = mlx5e_open_channel(priv, queue_index, ¶ms, NULL, &new->c); +unlock: + mutex_unlock(&priv->state_lock); + return err; +} + +static void mlx5e_queue_mem_free(struct net_device *dev, void *mem) +{ + struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem; + + /* not supposed to happen since mlx5e_queue_start never fails + * but this is how this should be implemented just in case + */ + if (data->c) + mlx5e_close_channel(data->c); +} + +static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index) +{ + /* In mlx5 a txq cannot be simply stopped in isolation, only restarted. + * mlx5e_queue_start does not fail, we stop the old queue there. + * TODO: Improve this. + */ + return 0; +} + +static int mlx5e_queue_start(struct net_device *dev, void *newq, + int queue_index) +{ + struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq; + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channel *old; + + mutex_lock(&priv->state_lock); + + /* stop and close the old */ + old = priv->channels.c[queue_index]; + mlx5e_deactivate_priv_channels(priv); + /* close old before activating new, to avoid napi conflict */ + mlx5e_close_channel(old); + + /* start the new */ + priv->channels.c[queue_index] = new->c; + mlx5e_activate_priv_channels(priv); + mutex_unlock(&priv->state_lock); + return 0; +} + +static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = { + .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data), + .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc, + .ndo_queue_mem_free = mlx5e_queue_mem_free, + .ndo_queue_start = mlx5e_queue_start, + .ndo_queue_stop = mlx5e_queue_stop, +}; + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -5464,6 +5608,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops; + netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops; netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops; netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops; netdev->request_ops_lock = true; @@ -5506,17 +5651,17 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) MLX5E_MPWRQ_UMR_MODE_ALIGNED)) netdev->vlan_features |= NETIF_F_LRO; + if (mlx5e_hw_gro_supported(mdev) && + mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED)) + netdev->vlan_features |= NETIF_F_GRO_HW; + netdev->hw_features = netdev->vlan_features; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (mlx5e_hw_gro_supported(mdev) && - mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, - MLX5E_MPWRQ_UMR_MODE_ALIGNED)) - netdev->hw_features |= NETIF_F_GRO_HW; - if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -5595,6 +5740,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netmem_tx = true; + netif_set_tso_max_size(netdev, GSO_MAX_SIZE); mlx5e_set_xdp_feature(netdev); mlx5e_set_netdev_dev_addr(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 84b1ab8233b8..2bb32082bfcc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -273,33 +273,32 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64) -static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq, +static int mlx5e_page_alloc_fragmented(struct page_pool *pp, struct mlx5e_frag_page *frag_page) { - struct page *page; + netmem_ref netmem = page_pool_dev_alloc_netmems(pp); - page = page_pool_dev_alloc_pages(rq->page_pool); - if (unlikely(!page)) + if (unlikely(!netmem)) return -ENOMEM; - page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX); + page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX); *frag_page = (struct mlx5e_frag_page) { - .page = page, + .netmem = netmem, .frags = 0, }; return 0; } -static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq, +static void mlx5e_page_release_fragmented(struct page_pool *pp, struct mlx5e_frag_page *frag_page) { u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags; - struct page *page = frag_page->page; + netmem_ref netmem = frag_page->netmem; - if (page_pool_unref_page(page, drain_count) == 0) - page_pool_put_unrefed_page(rq->page_pool, page, -1, true); + if (page_pool_unref_netmem(netmem, drain_count) == 0) + page_pool_put_unrefed_netmem(pp, netmem, -1, true); } static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, @@ -313,7 +312,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, * offset) should just use the new one without replenishing again * by themselves. */ - err = mlx5e_page_alloc_fragmented(rq, frag->frag_page); + err = mlx5e_page_alloc_fragmented(rq->page_pool, + frag->frag_page); return err; } @@ -332,7 +332,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *frag) { if (mlx5e_frag_can_release(frag)) - mlx5e_page_release_fragmented(rq, frag->frag_page); + mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -358,7 +358,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); headroom = i == 0 ? rq->buff.headroom : 0; - addr = page_pool_get_dma_addr(frag->frag_page->page); + addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem); wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); } @@ -499,9 +499,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page, u32 frag_offset, u32 len) { + netmem_ref netmem = frag_page->netmem; skb_frag_t *frag; - dma_addr_t addr = page_pool_get_dma_addr(frag_page->page); + dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem); dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir); if (!xdp_buff_has_frags(xdp)) { @@ -514,9 +515,9 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf } frag = &sinfo->frags[sinfo->nr_frags++]; - skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len); + skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len); - if (page_is_pfmemalloc(frag_page->page)) + if (netmem_is_pfmemalloc(netmem)) xdp_buff_set_frag_pfmemalloc(xdp); sinfo->xdp_frags_size += len; } @@ -527,27 +528,29 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, u32 frag_offset, u32 len, unsigned int truesize) { - dma_addr_t addr = page_pool_get_dma_addr(frag_page->page); + dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem); u8 next_frag = skb_shinfo(skb)->nr_frags; + netmem_ref netmem = frag_page->netmem; dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir); - if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) { + if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) { skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize); - } else { - frag_page->frags++; - skb_add_rx_frag(skb, next_frag, frag_page->page, - frag_offset, len, truesize); + return; } + + frag_page->frags++; + skb_add_rx_frag_netmem(skb, next_frag, netmem, + frag_offset, len, truesize); } static inline void mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, - struct page *page, dma_addr_t addr, + netmem_ref netmem, dma_addr_t addr, int offset_from, int dma_offset, u32 headlen) { - const void *from = page_address(page) + offset_from; + const void *from = netmem_address(netmem) + offset_from; /* Aligning len to sizeof(long) optimizes memcpy performance */ unsigned int len = ALIGN(headlen, sizeof(long)); @@ -584,7 +587,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) struct mlx5e_frag_page *frag_page; frag_page = &wi->alloc_units.frag_pages[i]; - mlx5e_page_release_fragmented(rq, frag_page); + mlx5e_page_release_fragmented(rq->page_pool, + frag_page); } } } @@ -679,12 +683,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); u64 addr; - err = mlx5e_page_alloc_fragmented(rq, frag_page); + err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page); if (unlikely(err)) goto err_unmap; - - addr = page_pool_get_dma_addr(frag_page->page); + addr = page_pool_get_dma_addr_netmem(frag_page->netmem); for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { header_offset = mlx5e_shampo_hd_offset(index++); @@ -715,7 +718,8 @@ err_unmap: if (!header_offset) { struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); - mlx5e_page_release_fragmented(rq, frag_page); + mlx5e_page_release_fragmented(rq->hd_page_pool, + frag_page); } } @@ -791,10 +795,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) { dma_addr_t addr; - err = mlx5e_page_alloc_fragmented(rq, frag_page); + err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page); if (unlikely(err)) goto err_unmap; - addr = page_pool_get_dma_addr(frag_page->page); + + addr = page_pool_get_dma_addr_netmem(frag_page->netmem); umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { .ptag = cpu_to_be64(addr | MLX5_EN_WR), }; @@ -836,7 +841,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) err_unmap: while (--i >= 0) { frag_page--; - mlx5e_page_release_fragmented(rq, frag_page); + mlx5e_page_release_fragmented(rq->page_pool, frag_page); } bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); @@ -855,7 +860,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - mlx5e_page_release_fragmented(rq, frag_page); + mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page); } clear_bit(header_index, shampo->bitmap); } @@ -1100,6 +1105,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (rq->page_pool) page_pool_nid_changed(rq->page_pool, numa_mem_id()); + if (rq->hd_page_pool) + page_pool_nid_changed(rq->hd_page_pool, numa_mem_id()); head = rq->mpwqe.actual_wq_head; i = missing; @@ -1212,7 +1219,7 @@ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; - return page_address(frag_page->page) + head_offset; + return netmem_address(frag_page->netmem) + head_offset; } static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) @@ -1673,11 +1680,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, dma_addr_t addr; u32 frag_size; - va = page_address(frag_page->page) + wi->offset; + va = netmem_address(frag_page->netmem) + wi->offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - addr = page_pool_get_dma_addr(frag_page->page); + addr = page_pool_get_dma_addr_netmem(frag_page->netmem); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, frag_size, rq->buff.map_dir); net_prefetch(data); @@ -1727,10 +1734,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi frag_page = wi->frag_page; - va = page_address(frag_page->page) + wi->offset; + va = netmem_address(frag_page->netmem) + wi->offset; frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); - addr = page_pool_get_dma_addr(frag_page->page); + addr = page_pool_get_dma_addr_netmem(frag_page->netmem); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, rq->buff.frame0_sz, rq->buff.map_dir); net_prefetchw(va); /* xdp_frame data area */ @@ -2003,12 +2010,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w if (prog) { /* area for bpf_xdp_[store|load]_bytes */ - net_prefetchw(page_address(frag_page->page) + frag_offset); - if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) { + net_prefetchw(netmem_address(frag_page->netmem) + frag_offset); + if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool, + &wi->linear_page))) { rq->stats->buff_alloc_err++; return NULL; } - va = page_address(wi->linear_page.page); + + va = netmem_address(wi->linear_page.netmem); net_prefetchw(va); /* xdp_frame data area */ linear_hr = XDP_PACKET_HEADROOM; linear_data_len = 0; @@ -2068,7 +2077,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w wi->linear_page.frags++; } - mlx5e_page_release_fragmented(rq, &wi->linear_page); + mlx5e_page_release_fragmented(rq->page_pool, + &wi->linear_page); return NULL; /* page/packet was consumed by XDP */ } @@ -2077,13 +2087,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0, mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) { - mlx5e_page_release_fragmented(rq, &wi->linear_page); + mlx5e_page_release_fragmented(rq->page_pool, + &wi->linear_page); return NULL; } skb_mark_for_recycle(skb); wi->linear_page.frags++; - mlx5e_page_release_fragmented(rq, &wi->linear_page); + mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page); if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; @@ -2117,8 +2128,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (++pagep < frag_page); } /* copy header */ - addr = page_pool_get_dma_addr(head_page->page); - mlx5e_copy_skb_header(rq, skb, head_page->page, addr, + addr = page_pool_get_dma_addr_netmem(head_page->netmem); + mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr, head_offset, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; @@ -2148,11 +2159,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return NULL; } - va = page_address(frag_page->page) + head_offset; + va = netmem_address(frag_page->netmem) + head_offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - addr = page_pool_get_dma_addr(frag_page->page); + addr = page_pool_get_dma_addr_netmem(frag_page->netmem); dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, frag_size, rq->buff.map_dir); net_prefetch(data); @@ -2191,16 +2202,19 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page); u16 head_offset = mlx5e_shampo_hd_offset(header_index); - dma_addr_t dma_addr = page_dma_addr + head_offset; u16 head_size = cqe->shampo.header_size; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb = NULL; + dma_addr_t page_dma_addr; + dma_addr_t dma_addr; void *hdr, *data; u32 frag_size; - hdr = page_address(frag_page->page) + head_offset; + page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem); + dma_addr = page_dma_addr + head_offset; + + hdr = netmem_address(frag_page->netmem) + head_offset; data = hdr + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); @@ -2225,7 +2239,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } net_prefetchw(skb->data); - mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr, + mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ @@ -2319,11 +2333,23 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (!*skb) { - if (likely(head_size)) + if (likely(head_size)) { *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); - else - *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt, - data_offset, page_idx); + } else { + struct mlx5e_frag_page *frag_page; + + frag_page = &wi->alloc_units.frag_pages[page_idx]; + /* Drop packets with header in unreadable data area to + * prevent the kernel from touching it. + */ + if (unlikely(netmem_is_net_iov(frag_page->netmem))) + goto free_hd_entry; + *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, + cqe_bcnt, + data_offset, + page_idx); + } + if (unlikely(!*skb)) goto free_hd_entry; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index fb2e5b844c15..d76d7a945899 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev) priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX); phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0); - if (phy_irq < 0) { - dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead"); + if (phy_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out; + } else if (phy_irq < 0) { phy_irq = PHY_POLL; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 36393a17d92d..1d8ff0cbe607 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -446,6 +446,26 @@ enum { #define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */ #define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */ #define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */ +#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_L \ + 0x04433 /* 0x110cc */ +#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_H \ + 0x04434 /* 0x110d0 */ +#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_L \ + 0x04435 /* 0x110d4 */ +#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_H \ + 0x04436 /* 0x110d8 */ +#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_L \ + 0x04437 /* 0x110dc */ +#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_H \ + 0x04438 /* 0x110e0 */ +#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_L \ + 0x04439 /* 0x110e4 */ +#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_H \ + 0x0443a /* 0x110e8 */ +#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_L \ + 0x0443b /* 0x110ec */ +#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_H \ + 0x0443c /* 0x110f0 */ #define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ /* Precision Time Protocol Registers */ @@ -674,6 +694,26 @@ enum { #define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */ #define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */ +#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_L \ + 0x0855f /* 0x2157c */ +#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_H \ + 0x08560 /* 0x21580 */ +#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_L \ + 0x08561 /* 0x21584 */ +#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_H \ + 0x08562 /* 0x21588 */ +#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_L \ + 0x08563 /* 0x2158c */ +#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_H \ + 0x08564 /* 0x21590 */ +#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_L \ + 0x08565 /* 0x21594 */ +#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_H \ + 0x08566 /* 0x21598 */ +#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_L \ + 0x08567 /* 0x2159c */ +#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_H \ + 0x08568 /* 0x215a0 */ #define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */ /* RPC RAM Registers */ @@ -796,6 +836,46 @@ enum { #define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */ #define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */ #define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */ +#define FBNIC_MAC_STAT_RX_UNDERSIZE_L 0x11a24 /* 0x46890 */ +#define FBNIC_MAC_STAT_RX_UNDERSIZE_H 0x11a25 /* 0x46894 */ +#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_L \ + 0x11a26 /* 0x46898 */ +#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_H \ + 0x11a27 /* 0x4689c */ +#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_L \ + 0x11a28 /* 0x468a0 */ +#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_H \ + 0x11a29 /* 0x468a4 */ +#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_L \ + 0x11a2a /* 0x468a8 */ +#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_H \ + 0x11a2b /* 0x468ac */ +#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_L \ + 0x11a2c /* 0x468b0 */ +#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_H \ + 0x11a2d /* 0x468b4 */ +#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_L \ + 0x11a2e /* 0x468b8 */ +#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_H \ + 0x11a2f /* 0x468bc */ +#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_L \ + 0x11a30 /* 0x468c0 */ +#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_H \ + 0x11a31 /* 0x468c4 */ +#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_L \ + 0x11a32 /* 0x468c8 */ +#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_H \ + 0x11a33 /* 0x468cc */ +#define FBNIC_MAC_STAT_RX_OVERSIZE_L 0x11a34 /* 0x468d0 */ +#define FBNIC_MAC_STAT_RX_OVERSSIZE_H 0x11a35 /* 0x468d4 */ +#define FBNIC_MAC_STAT_RX_JABBER_L 0x11a36 /* 0x468d8 */ +#define FBNIC_MAC_STAT_RX_JABBER_H 0x11a37 /* 0x468dc */ +#define FBNIC_MAC_STAT_RX_FRAGMENT_L 0x11a38 /* 0x468e0 */ +#define FBNIC_MAC_STAT_RX_FRAGMENT_H 0x11a39 /* 0x468e4 */ +#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_L \ + 0x11a3c /* 0x468f0 */ +#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_H \ + 0x11a3d /* 0x468f4 */ #define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */ #define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */ #define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \ @@ -810,6 +890,38 @@ enum { #define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */ #define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */ #define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */ +#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_L \ + 0x11a4e /* 0x46938 */ +#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_H \ + 0x11a4f /* 0x4693c */ +#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_L \ + 0x11a50 /* 0x46940 */ +#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_H \ + 0x11a51 /* 0x46944 */ +#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_L \ + 0x11a52 /* 0x46948 */ +#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_H \ + 0x11a53 /* 0x4694c */ +#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_L \ + 0x11a54 /* 0x46950 */ +#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_H \ + 0x11a55 /* 0x46954 */ +#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_L \ + 0x11a56 /* 0x46958 */ +#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_H \ + 0x11a57 /* 0x4695c */ +#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_L \ + 0x11a58 /* 0x46960 */ +#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_H \ + 0x11a59 /* 0x46964 */ +#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_L \ + 0x11a5a /* 0x46968 */ +#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_H \ + 0x11a5b /* 0x4696c */ +#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_L \ + 0x11a5e /* 0x46978 */ +#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_H \ + 0x11a5f /* 0x4697c */ /* PCIE Comphy Registers */ #define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 5c7556c8c4c5..4646e80c3462 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -531,20 +531,6 @@ static int fbnic_get_rss_hash_idx(u32 flow_type) return -1; } -static int -fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd) -{ - int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); - - if (hash_opt_idx < 0) - return -EINVAL; - - /* Report options from rss_en table in fbn */ - cmd->data = fbn->rss_flow_hash[hash_opt_idx]; - - return 0; -} - static int fbnic_get_cls_rule_all(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd, u32 *rule_locs) @@ -779,9 +765,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev, cmd->data = fbn->num_rx_queues; ret = 0; break; - case ETHTOOL_GRXFH: - ret = fbnic_get_rss_hash_opts(fbn, cmd); - break; case ETHTOOL_GRXCLSRULE: ret = fbnic_get_cls_rule(fbn, cmd); break; @@ -803,41 +786,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev, return ret; } -#define FBNIC_L2_HASH_OPTIONS \ - (RXH_L2DA | RXH_DISCARD) -#define FBNIC_L3_HASH_OPTIONS \ - (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST) -#define FBNIC_L4_HASH_OPTIONS \ - (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3) - -static int -fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd) -{ - int hash_opt_idx; - - /* Verify the type requested is correct */ - hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); - if (hash_opt_idx < 0) - return -EINVAL; - - /* Verify the fields asked for can actually be assigned based on type */ - if (cmd->data & ~FBNIC_L4_HASH_OPTIONS || - (hash_opt_idx > FBNIC_L4_HASH_OPT && - cmd->data & ~FBNIC_L3_HASH_OPTIONS) || - (hash_opt_idx > FBNIC_IP_HASH_OPT && - cmd->data & ~FBNIC_L2_HASH_OPTIONS)) - return -EINVAL; - - fbn->rss_flow_hash[hash_opt_idx] = cmd->data; - - if (netif_running(fbn->netdev)) { - fbnic_rss_reinit(fbn->fbd, fbn); - fbnic_write_rules(fbn->fbd); - } - - return 0; -} - static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd) { int i; @@ -1244,9 +1192,6 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = fbnic_set_rss_hash_opts(fbn, cmd); - break; case ETHTOOL_SRXCLSRLINS: ret = fbnic_set_cls_rule_ins(fbn, cmd); break; @@ -1347,6 +1292,60 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, } static int +fbnic_get_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxfh_fields *cmd) +{ + int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); + struct fbnic_net *fbn = netdev_priv(netdev); + + if (hash_opt_idx < 0) + return -EINVAL; + + /* Report options from rss_en table in fbn */ + cmd->data = fbn->rss_flow_hash[hash_opt_idx]; + + return 0; +} + +#define FBNIC_L2_HASH_OPTIONS \ + (RXH_L2DA | RXH_DISCARD) +#define FBNIC_L3_HASH_OPTIONS \ + (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST) +#define FBNIC_L4_HASH_OPTIONS \ + (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static int +fbnic_set_rss_hash_opts(struct net_device *netdev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int hash_opt_idx; + + /* Verify the type requested is correct */ + hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type); + if (hash_opt_idx < 0) + return -EINVAL; + + /* Verify the fields asked for can actually be assigned based on type */ + if (cmd->data & ~FBNIC_L4_HASH_OPTIONS || + (hash_opt_idx > FBNIC_L4_HASH_OPT && + cmd->data & ~FBNIC_L3_HASH_OPTIONS) || + (hash_opt_idx > FBNIC_IP_HASH_OPT && + cmd->data & ~FBNIC_L2_HASH_OPTIONS)) + return -EINVAL; + + fbn->rss_flow_hash[hash_opt_idx] = cmd->data; + + if (netif_running(fbn->netdev)) { + fbnic_rss_reinit(fbn->fbd, fbn); + fbnic_write_rules(fbn->fbd); + } + + return 0; +} + +static int fbnic_modify_rxfh_context(struct net_device *netdev, struct ethtool_rxfh_context *ctx, const struct ethtool_rxfh_param *rxfh, @@ -1612,6 +1611,70 @@ fbnic_get_eth_mac_stats(struct net_device *netdev, &mac_stats->eth_mac.FrameTooLongErrors); } +static void +fbnic_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_stats *mac_stats; + struct fbnic_dev *fbd = fbn->fbd; + + mac_stats = &fbd->hw_stats.mac; + + fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl); + + eth_ctrl_stats->MACControlFramesReceived = + mac_stats->eth_ctrl.MACControlFramesReceived.value; + eth_ctrl_stats->MACControlFramesTransmitted = + mac_stats->eth_ctrl.MACControlFramesTransmitted.value; +} + +static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 9216 }, + { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE }, + {} +}; + +static void +fbnic_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_stats *mac_stats; + struct fbnic_dev *fbd = fbn->fbd; + int i; + + mac_stats = &fbd->hw_stats.mac; + + fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon); + + rmon_stats->undersize_pkts = + mac_stats->rmon.undersize_pkts.value; + rmon_stats->oversize_pkts = + mac_stats->rmon.oversize_pkts.value; + rmon_stats->fragments = + mac_stats->rmon.fragments.value; + rmon_stats->jabbers = + mac_stats->rmon.jabbers.value; + + for (i = 0; fbnic_rmon_ranges[i].high; i++) { + rmon_stats->hist[i] = mac_stats->rmon.hist[i].value; + rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value; + } + + *ranges = fbnic_rmon_ranges; +} + static const struct ethtool_ops fbnic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -1633,6 +1696,8 @@ static const struct ethtool_ops fbnic_ethtool_ops = { .get_rxfh_indir_size = fbnic_get_rxfh_indir_size, .get_rxfh = fbnic_get_rxfh, .set_rxfh = fbnic_set_rxfh, + .get_rxfh_fields = fbnic_get_rss_hash_opts, + .set_rxfh_fields = fbnic_set_rss_hash_opts, .create_rxfh_context = fbnic_create_rxfh_context, .modify_rxfh_context = fbnic_modify_rxfh_context, .remove_rxfh_context = fbnic_remove_rxfh_context, @@ -1641,6 +1706,8 @@ static const struct ethtool_ops fbnic_ethtool_ops = { .get_ts_info = fbnic_get_ts_info, .get_ts_stats = fbnic_get_ts_stats, .get_eth_mac_stats = fbnic_get_eth_mac_stats, + .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats, + .get_rmon_stats = fbnic_get_rmon_stats, }; void fbnic_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index e2368075ab8c..4521d0483d18 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, return -EBUSY; addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction); - if (dma_mapping_error(fbd->dev, addr)) { - free_page((unsigned long)msg); - + if (dma_mapping_error(fbd->dev, addr)) return -ENOSPC; - } mbx->buf_info[tail].msg = msg; mbx->buf_info[tail].addr = addr; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h index 07e54bb75bf3..4fe239717497 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h @@ -22,6 +22,23 @@ struct fbnic_hw_stat { struct fbnic_stat_counter bytes; }; +/* Note: not updated by fbnic_get_hw_stats() */ +struct fbnic_eth_ctrl_stats { + struct fbnic_stat_counter MACControlFramesTransmitted; + struct fbnic_stat_counter MACControlFramesReceived; +}; + +/* Note: not updated by fbnic_get_hw_stats() */ +struct fbnic_rmon_stats { + struct fbnic_stat_counter undersize_pkts; + struct fbnic_stat_counter oversize_pkts; + struct fbnic_stat_counter fragments; + struct fbnic_stat_counter jabbers; + + struct fbnic_stat_counter hist[ETHTOOL_RMON_HIST_MAX]; + struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX]; +}; + struct fbnic_eth_mac_stats { struct fbnic_stat_counter FramesTransmittedOK; struct fbnic_stat_counter FramesReceivedOK; @@ -40,6 +57,8 @@ struct fbnic_eth_mac_stats { struct fbnic_mac_stats { struct fbnic_eth_mac_stats eth_mac; + struct fbnic_eth_ctrl_stats eth_ctrl; + struct fbnic_rmon_stats rmon; }; struct fbnic_tmi_stats { diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 10e108c1fcd0..ea5ea7e329cb 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -680,6 +680,76 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, MAC_STAT_TX_BROADCAST); } +static void +fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset, + struct fbnic_eth_ctrl_stats *ctrl_stats) +{ + fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived, + MAC_STAT_RX_CONTROL_FRAMES); + fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted, + MAC_STAT_TX_CONTROL_FRAMES); +} + +static void +fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset, + struct fbnic_rmon_stats *rmon_stats) +{ + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts, + MAC_STAT_RX_UNDERSIZE); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts, + MAC_STAT_RX_OVERSIZE); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments, + MAC_STAT_RX_FRAGMENT); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers, + MAC_STAT_RX_JABBER); + + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0], + MAC_STAT_RX_PACKET_64_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1], + MAC_STAT_RX_PACKET_65_127_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2], + MAC_STAT_RX_PACKET_128_255_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3], + MAC_STAT_RX_PACKET_256_511_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4], + MAC_STAT_RX_PACKET_512_1023_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5], + MAC_STAT_RX_PACKET_1024_1518_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6], + RPC_STAT_RX_PACKET_1519_2047_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7], + RPC_STAT_RX_PACKET_2048_4095_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8], + RPC_STAT_RX_PACKET_4096_8191_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9], + RPC_STAT_RX_PACKET_8192_9216_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10], + RPC_STAT_RX_PACKET_9217_MAX_BYTES); + + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0], + MAC_STAT_TX_PACKET_64_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1], + MAC_STAT_TX_PACKET_65_127_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2], + MAC_STAT_TX_PACKET_128_255_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3], + MAC_STAT_TX_PACKET_256_511_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4], + MAC_STAT_TX_PACKET_512_1023_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5], + MAC_STAT_TX_PACKET_1024_1518_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6], + TMI_STAT_TX_PACKET_1519_2047_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7], + TMI_STAT_TX_PACKET_2048_4095_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8], + TMI_STAT_TX_PACKET_4096_8191_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9], + TMI_STAT_TX_PACKET_8192_9216_BYTES); + fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10], + TMI_STAT_TX_PACKET_9217_MAX_BYTES); +} + static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val) { @@ -755,6 +825,8 @@ static const struct fbnic_mac fbnic_mac_asic = { .pcs_get_link = fbnic_pcs_get_link_asic, .pcs_get_link_event = fbnic_pcs_get_link_event_asic, .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, + .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats, + .get_rmon_stats = fbnic_mac_get_rmon_stats, .link_down = fbnic_mac_link_down_asic, .link_up = fbnic_mac_link_up_asic, .get_sensor = fbnic_mac_get_sensor_asic, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h index 05a591653e09..4d508e1e2151 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -85,6 +85,10 @@ struct fbnic_mac { void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset, struct fbnic_eth_mac_stats *mac_stats); + void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset, + struct fbnic_eth_ctrl_stats *ctrl_stats); + void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset, + struct fbnic_rmon_stats *rmon_stats); void (*link_down)(struct fbnic_dev *fbd); void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 64a3b953cc17..40002d9fe274 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -913,23 +913,29 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) } } +static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev, + struct ethtool_rxfh_fields *fields) +{ + fields->data = 0; + + switch (fields->flow_type) { + case TCP_V4_FLOW:case UDP_V4_FLOW: + case TCP_V6_FLOW:case UDP_V6_FLOW: + fields->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case IPV4_FLOW: case IPV6_FLOW: + fields->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; + } + + return 0; +} + static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { switch (rxnfc->cmd) { - case ETHTOOL_GRXFH: - rxnfc->data = 0; - switch (rxnfc->flow_type) { - case TCP_V4_FLOW:case UDP_V4_FLOW: - case TCP_V6_FLOW:case UDP_V6_FLOW: - rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - fallthrough; - case IPV4_FLOW: case IPV6_FLOW: - rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; - return 0; - } - break; case ETHTOOL_GRXRINGS: rxnfc->data = LAN743X_USED_RX_CHANNELS; return 0; @@ -1368,6 +1374,7 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size, .get_rxfh = lan743x_ethtool_get_rxfh, .set_rxfh = lan743x_ethtool_set_rxfh, + .get_rxfh_fields = lan743x_ethtool_get_rxfh_fields, .get_ts_info = lan743x_ethtool_get_ts_info, .get_eee = lan743x_ethtool_get_eee, .set_eee = lan743x_ethtool_set_eee, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index e8d073bfa2ca..f33dc83c5700 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -18,9 +18,9 @@ */ #define LAN743X_PTP_N_EVENT_CHAN 2 #define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN -#define LAN743X_PTP_N_EXTTS 4 -#define LAN743X_PTP_N_PPS 0 #define PCI11X1X_PTP_IO_MAX_CHANNELS 8 +#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS +#define LAN743X_PTP_N_PPS 0 #define PTP_CMD_CTL_TIMEOUT_CNT 50 struct lan743x_adapter; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 3504507477c6..ac2f39853bf4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -6,6 +6,8 @@ #include <linux/pci.h> #include <linux/utsname.h> #include <linux/version.h> +#include <linux/msi.h> +#include <linux/irqdomain.h> #include <net/mana/mana.h> @@ -80,8 +82,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) return err ? err : -EPROTO; } - if (gc->num_msix_usable > resp.max_msix) - gc->num_msix_usable = resp.max_msix; + if (!pci_msix_can_alloc_dyn(pdev)) { + if (gc->num_msix_usable > resp.max_msix) + gc->num_msix_usable = resp.max_msix; + } else { + /* If dynamic allocation is enabled we have already allocated + * hwc msi + */ + gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1); + } if (gc->num_msix_usable <= 1) return -ENOSPC; @@ -352,11 +361,59 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) } EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA"); +#define MANA_SERVICE_PERIOD 10 + +struct mana_serv_work { + struct work_struct serv_work; + struct pci_dev *pdev; +}; + +static void mana_serv_func(struct work_struct *w) +{ + struct mana_serv_work *mns_wk; + struct pci_bus *bus, *parent; + struct pci_dev *pdev; + + mns_wk = container_of(w, struct mana_serv_work, serv_work); + pdev = mns_wk->pdev; + + pci_lock_rescan_remove(); + + if (!pdev) + goto out; + + bus = pdev->bus; + if (!bus) { + dev_err(&pdev->dev, "MANA service: no bus\n"); + goto out; + } + + parent = bus->parent; + if (!parent) { + dev_err(&pdev->dev, "MANA service: no parent bus\n"); + goto out; + } + + pci_stop_and_remove_bus_device(bus->self); + + msleep(MANA_SERVICE_PERIOD * 1000); + + pci_rescan_bus(parent); + +out: + pci_unlock_rescan_remove(); + + pci_dev_put(pdev); + kfree(mns_wk); + module_put(THIS_MODULE); +} + static void mana_gd_process_eqe(struct gdma_queue *eq) { u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); struct gdma_context *gc = eq->gdma_dev->gdma_context; struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; + struct mana_serv_work *mns_wk; union gdma_eqe_info eqe_info; enum gdma_eqe_type type; struct gdma_event event; @@ -401,6 +458,33 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) eq->eq.callback(eq->eq.context, eq, &event); break; + case GDMA_EQE_HWC_FPGA_RECONFIG: + dev_info(gc->dev, "Recv MANA service type:%d\n", type); + + if (gc->in_service) { + dev_info(gc->dev, "Already in service\n"); + break; + } + + if (!try_module_get(THIS_MODULE)) { + dev_info(gc->dev, "Module is unloading\n"); + break; + } + + mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC); + if (!mns_wk) { + module_put(THIS_MODULE); + break; + } + + dev_info(gc->dev, "Start MANA service type:%d\n", type); + gc->in_service = true; + mns_wk->pdev = to_pci_dev(gc->dev); + pci_dev_get(mns_wk->pdev); + INIT_WORK(&mns_wk->serv_work, mana_serv_func); + schedule_work(&mns_wk->serv_work); + break; + default: break; } @@ -483,7 +567,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue, } queue->eq.msix_index = msi_index; - gic = &gc->irq_contexts[msi_index]; + gic = xa_load(&gc->irq_contexts, msi_index); + if (WARN_ON(!gic)) + return -EINVAL; spin_lock_irqsave(&gic->lock, flags); list_add_rcu(&queue->entry, &gic->eq_list); @@ -508,7 +594,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue) if (WARN_ON(msix_index >= gc->num_msix_usable)) return; - gic = &gc->irq_contexts[msix_index]; + gic = xa_load(&gc->irq_contexts, msix_index); + if (WARN_ON(!gic)) + return; + spin_lock_irqsave(&gic->lock, flags); list_for_each_entry_rcu(eq, &gic->eq_list, entry) { if (queue == eq) { @@ -1288,7 +1377,49 @@ void mana_gd_free_res_map(struct gdma_resource *r) r->size = 0; } -static int irq_setup(unsigned int *irqs, unsigned int len, int node) +/* + * Spread on CPUs with the following heuristics: + * + * 1. No more than one IRQ per CPU, if possible; + * 2. NUMA locality is the second priority; + * 3. Sibling dislocality is the last priority. + * + * Let's consider this topology: + * + * Node 0 1 + * Core 0 1 2 3 + * CPU 0 1 2 3 4 5 6 7 + * + * The most performant IRQ distribution based on the above topology + * and heuristics may look like this: + * + * IRQ Nodes Cores CPUs + * 0 1 0 0-1 + * 1 1 1 2-3 + * 2 1 0 0-1 + * 3 1 1 2-3 + * 4 2 2 4-5 + * 5 2 3 6-7 + * 6 2 2 4-5 + * 7 2 3 6-7 + * + * The heuristics is implemented as follows. + * + * The outer for_each() loop resets the 'weight' to the actual number + * of CPUs in the hop. Then inner for_each() loop decrements it by the + * number of sibling groups (cores) while assigning first set of IRQs + * to each group. IRQs 0 and 1 above are distributed this way. + * + * Now, because NUMA locality is more important, we should walk the + * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's + * implemented by the medium while() loop. We do like this unless the + * number of IRQs assigned on this hop will not become equal to number + * of CPUs in the hop (weight == 0). Then we switch to the next hop and + * do the same thing. + */ + +static int irq_setup(unsigned int *irqs, unsigned int len, int node, + bool skip_first_cpu) { const struct cpumask *next, *prev = cpu_none_mask; cpumask_var_t cpus __free(free_cpumask_var); @@ -1303,11 +1434,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node) while (weight > 0) { cpumask_andnot(cpus, next, prev); for_each_cpu(cpu, cpus) { + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + --weight; + + if (unlikely(skip_first_cpu)) { + skip_first_cpu = false; + continue; + } + if (len-- == 0) goto done; + irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu)); - cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); - --weight; } } prev = next; @@ -1317,47 +1455,108 @@ done: return 0; } -static int mana_gd_setup_irqs(struct pci_dev *pdev) +static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) { struct gdma_context *gc = pci_get_drvdata(pdev); - unsigned int max_queues_per_port; struct gdma_irq_context *gic; - unsigned int max_irqs, cpu; - int start_irq_index = 1; - int nvec, *irqs, irq; - int err, i = 0, j; + bool skip_first_cpu = false; + int *irqs, irq, err, i; - cpus_read_lock(); - max_queues_per_port = num_online_cpus(); - if (max_queues_per_port > MANA_MAX_NUM_QUEUES) - max_queues_per_port = MANA_MAX_NUM_QUEUES; + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; - /* Need 1 interrupt for the Hardware communication Channel (HWC) */ - max_irqs = max_queues_per_port + 1; + /* + * While processing the next pci irq vector, we start with index 1, + * as IRQ vector at index 0 is already processed for HWC. + * However, the population of irqs array starts with index 0, to be + * further used in irq_setup() + */ + for (i = 1; i <= nvec; i++) { + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; + INIT_LIST_HEAD(&gic->eq_list); + spin_lock_init(&gic->lock); - nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); - if (nvec < 0) { - cpus_read_unlock(); - return nvec; + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", + i - 1, pci_name(pdev)); + + /* one pci vector is already allocated for HWC */ + irqs[i - 1] = pci_irq_vector(pdev, i); + if (irqs[i - 1] < 0) { + err = irqs[i - 1]; + goto free_current_gic; + } + + err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - if (nvec <= num_online_cpus()) - start_irq_index = 0; - irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL); - if (!irqs) { - err = -ENOMEM; - goto free_irq_vector; + /* + * When calling irq_setup() for dynamically added IRQs, if number of + * CPUs is more than or equal to allocated MSI-X, we need to skip the + * first CPU sibling group since they are already affinitized to HWC IRQ + */ + cpus_read_lock(); + if (gc->num_msix_usable <= num_online_cpus()) + skip_first_cpu = true; + + err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu); + if (err) { + cpus_read_unlock(); + goto free_irq; } - gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), - GFP_KERNEL); - if (!gc->irq_contexts) { - err = -ENOMEM; - goto free_irq_array; + cpus_read_unlock(); + kfree(irqs); + return 0; + +free_current_gic: + kfree(gic); +free_irq: + for (i -= 1; i > 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; + + irq_update_affinity_hint(irq, NULL); + free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } + kfree(irqs); + return err; +} + +static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + int *irqs, *start_irqs, irq; + unsigned int cpu; + int err, i; + + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + start_irqs = irqs; for (i = 0; i < nvec; i++) { - gic = &gc->irq_contexts[i]; + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; INIT_LIST_HEAD(&gic->eq_list); spin_lock_init(&gic->lock); @@ -1369,69 +1568,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", i - 1, pci_name(pdev)); - irq = pci_irq_vector(pdev, i); - if (irq < 0) { - err = irq; - goto free_irq; + irqs[i] = pci_irq_vector(pdev, i); + if (irqs[i] < 0) { + err = irqs[i]; + goto free_current_gic; } - if (!i) { - err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_irq; - - /* If number of IRQ is one extra than number of online CPUs, - * then we need to assign IRQ0 (hwc irq) and IRQ1 to - * same CPU. - * Else we will use different CPUs for IRQ0 and IRQ1. - * Also we are using cpumask_local_spread instead of - * cpumask_first for the node, because the node can be - * mem only. - */ - if (start_irq_index) { - cpu = cpumask_local_spread(i, gc->numa_node); - irq_set_affinity_and_hint(irq, cpumask_of(cpu)); - } else { - irqs[start_irq_index] = irq; - } - } else { - irqs[i - start_irq_index] = irq; - err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0, - gic->name, gic); - if (err) - goto free_irq; - } + err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node); - if (err) + /* If number of IRQ is one extra than number of online CPUs, + * then we need to assign IRQ0 (hwc irq) and IRQ1 to + * same CPU. + * Else we will use different CPUs for IRQ0 and IRQ1. + * Also we are using cpumask_local_spread instead of + * cpumask_first for the node, because the node can be + * mem only. + */ + cpus_read_lock(); + if (nvec > num_online_cpus()) { + cpu = cpumask_local_spread(0, gc->numa_node); + irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu)); + irqs++; + nvec -= 1; + } + + err = irq_setup(irqs, nvec, gc->numa_node, false); + if (err) { + cpus_read_unlock(); goto free_irq; + } - gc->max_num_msix = nvec; - gc->num_msix_usable = nvec; cpus_read_unlock(); - kfree(irqs); + kfree(start_irqs); return 0; +free_current_gic: + kfree(gic); free_irq: - for (j = i - 1; j >= 0; j--) { - irq = pci_irq_vector(pdev, j); - gic = &gc->irq_contexts[j]; + for (i -= 1; i >= 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; -free_irq_array: - kfree(irqs); -free_irq_vector: - cpus_read_unlock(); - pci_free_irq_vectors(pdev); + kfree(start_irqs); return err; } +static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + unsigned int max_irqs, min_irqs; + int nvec, err; + + if (pci_msix_can_alloc_dyn(pdev)) { + max_irqs = 1; + min_irqs = 1; + } else { + /* Need 1 interrupt for HWC */ + max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1; + min_irqs = 2; + } + + nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + err = mana_gd_setup_irqs(pdev, nvec); + if (err) { + pci_free_irq_vectors(pdev); + return err; + } + + gc->num_msix_usable = nvec; + gc->max_num_msix = nvec; + + return 0; +} + +static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct msi_map irq_map; + int max_irqs, i, err; + + if (!pci_msix_can_alloc_dyn(pdev)) + /* remain irqs are already allocated with HWC IRQ */ + return 0; + + /* allocate only remaining IRQs*/ + max_irqs = gc->num_msix_usable - 1; + + for (i = 1; i <= max_irqs; i++) { + irq_map = pci_msix_alloc_irq_at(pdev, i, NULL); + if (!irq_map.virq) { + err = irq_map.index; + /* caller will handle cleaning up all allocated + * irqs, after HWC is destroyed + */ + return err; + } + } + + err = mana_gd_setup_dyn_irqs(pdev, max_irqs); + if (err) + return err; + + gc->max_num_msix = gc->max_num_msix + max_irqs; + + return 0; +} + static void mana_gd_remove_irqs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -1446,19 +1704,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev) if (irq < 0) continue; - gic = &gc->irq_contexts[i]; + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; /* Need to clear the hint before free_irq */ irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } pci_free_irq_vectors(pdev); gc->max_num_msix = 0; gc->num_msix_usable = 0; - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; } static int mana_gd_setup(struct pci_dev *pdev) @@ -1473,9 +1733,10 @@ static int mana_gd_setup(struct pci_dev *pdev) if (!gc->service_wq) return -ENOMEM; - err = mana_gd_setup_irqs(pdev); + err = mana_gd_setup_hwc_irqs(pdev); if (err) { - dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); + dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n", + err); goto free_workqueue; } @@ -1491,6 +1752,12 @@ static int mana_gd_setup(struct pci_dev *pdev) if (err) goto destroy_hwc; + err = mana_gd_setup_remaining_irqs(pdev); + if (err) { + dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err); + goto destroy_hwc; + } + err = mana_gd_detect_devices(pdev); if (err) goto destroy_hwc; @@ -1571,6 +1838,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gc->is_pf = mana_is_pf(pdev->device); gc->bar0_va = bar0_va; gc->dev = &pdev->dev; + xa_init(&gc->irq_contexts); if (gc->is_pf) gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root); @@ -1605,6 +1873,7 @@ unmap_bar: */ debugfs_remove_recursive(gc->mana_pci_debugfs); gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); pci_iounmap(pdev, bar0_va); free_gc: pci_set_drvdata(pdev, NULL); @@ -1630,6 +1899,8 @@ static void mana_gd_remove(struct pci_dev *pdev) gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); + pci_iounmap(pdev, gc->bar0_va); vfree(gc); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index a8c4d8db75a5..650d22654d49 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021, Microsoft Corporation. */ #include <net/mana/gdma.h> +#include <net/mana/mana.h> #include <net/mana/hw_channel.h> #include <linux/vmalloc.h> @@ -890,8 +891,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, } if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) { - dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", - ctx->status_code); + if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) { + err = -EOPNOTSUPP; + goto out; + } + if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT) + dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", + ctx->status_code); err = -EPROTO; goto out; } diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index ccd2885c939e..016fd808ccad 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -251,10 +251,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct netdev_queue *net_txq; struct mana_stats_tx *tx_stats; struct gdma_queue *gdma_sq; + int err, len, num_gso_seg; unsigned int csum_type; struct mana_txq *txq; struct mana_cq *cq; - int err, len; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -407,6 +407,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_queue_tail(&txq->pending_skbs, skb); len = skb->len; + num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; net_txq = netdev_get_tx_queue(ndev, txq_idx); err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req, @@ -431,10 +432,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* skb may be freed after mana_gd_post_work_request. Do not use it. */ skb = NULL; + /* Populated the packet and bytes counters based on post GSO packet + * calculations + */ tx_stats = &txq->stats; u64_stats_update_begin(&tx_stats->syncp); - tx_stats->packets++; - tx_stats->bytes += len; + tx_stats->packets += num_gso_seg; + tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs); u64_stats_update_end(&tx_stats->syncp); tx_busy: @@ -719,6 +723,78 @@ out: return err; } +static int mana_shaper_set(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(binding->netdev); + u32 old_speed, rate; + int err; + + if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) { + NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev"); + return -EINVAL; + } + + if (apc->handle.id && shaper->handle.id != apc->handle.id) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers"); + return -EOPNOTSUPP; + } + + if (!shaper->bw_max || (shaper->bw_max % 100000000)) { + NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth"); + return -EINVAL; + } + + rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */ + rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */ + + /* Get current speed */ + err = mana_query_link_cfg(apc); + old_speed = (err) ? SPEED_UNKNOWN : apc->speed; + + if (!err) { + err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE); + apc->speed = (err) ? old_speed : rate; + apc->handle = (err) ? apc->handle : shaper->handle; + } + + return err; +} + +static int mana_shaper_del(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(binding->netdev); + int err; + + err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE); + + if (!err) { + /* Reset mana port context parameters */ + apc->handle.id = 0; + apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; + apc->speed = 0; + } + + return err; +} + +static void mana_shaper_cap(struct net_shaper_binding *binding, + enum net_shaper_scope scope, + unsigned long *flags) +{ + *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS); +} + +static const struct net_shaper_ops mana_shaper_ops = { + .set = mana_shaper_set, + .delete = mana_shaper_del, + .capabilities = mana_shaper_cap, +}; + static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, @@ -729,6 +805,7 @@ static const struct net_device_ops mana_devops = { .ndo_bpf = mana_bpf, .ndo_xdp_xmit = mana_xdp_xmit, .ndo_change_mtu = mana_change_mtu, + .net_shaper_ops = &mana_shaper_ops, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -774,8 +851,12 @@ static int mana_send_request(struct mana_context *ac, void *in_buf, err = mana_gd_send_request(gc, in_len, in_buf, out_len, out_buf); if (err || resp->status) { - dev_err(dev, "Failed to send mana message: %d, 0x%x\n", - err, resp->status); + if (err == -EOPNOTSUPP) + return err; + + if (req->req.msg_type != MANA_QUERY_PHY_STAT) + dev_err(dev, "Failed to send mana message: %d, 0x%x\n", + err, resp->status); return err ? err : -EPROTO; } @@ -1161,6 +1242,95 @@ out: return err; } +int mana_query_link_cfg(struct mana_port_context *apc) +{ + struct net_device *ndev = apc->ndev; + struct mana_query_link_config_resp resp = {}; + struct mana_query_link_config_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(req), sizeof(resp)); + + req.vport = apc->port_handle; + req.hdr.resp.msg_version = GDMA_MESSAGE_V2; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n"); + return err; + } + netdev_err(ndev, "Failed to query link config: %d\n", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EOPNOTSUPP; + return err; + } + + if (resp.qos_unconfigured) { + err = -EINVAL; + return err; + } + apc->speed = resp.link_speed_mbps; + apc->max_speed = resp.qos_speed_mbps; + return 0; +} + +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, + int enable_clamping) +{ + struct mana_set_bw_clamp_resp resp = {}; + struct mana_set_bw_clamp_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + req.link_speed_mbps = speed; + req.enable_clamping = enable_clamping; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n"); + return err; + } + netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d", + speed, err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EOPNOTSUPP; + return err; + } + + if (resp.qos_unconfigured) + netdev_info(ndev, "QoS is unconfigured\n"); + + return 0; +} + int mana_create_wq_obj(struct mana_port_context *apc, mana_handle_t vport, u32 wq_type, struct mana_obj_spec *wq_spec, @@ -1911,8 +2081,10 @@ static void mana_destroy_txq(struct mana_port_context *apc) napi = &apc->tx_qp[i].tx_cq.napi; if (apc->tx_qp[i].txq.napi_initialized) { napi_synchronize(napi); - napi_disable(napi); - netif_napi_del(napi); + netdev_lock_ops_to_full(napi->dev); + napi_disable_locked(napi); + netif_napi_del_locked(napi); + netdev_unlock_full_to_ops(napi->dev); apc->tx_qp[i].txq.napi_initialized = false; } mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); @@ -2064,8 +2236,11 @@ static int mana_create_txq(struct mana_port_context *apc, mana_create_txq_debugfs(apc, i); - netif_napi_add_tx(net, &cq->napi, mana_poll); - napi_enable(&cq->napi); + set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state); + netdev_lock_ops_to_full(net); + netif_napi_add_locked(net, &cq->napi, mana_poll); + napi_enable_locked(&cq->napi); + netdev_unlock_full_to_ops(net); txq->napi_initialized = true; mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); @@ -2101,9 +2276,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (napi_initialized) { napi_synchronize(napi); - napi_disable(napi); - - netif_napi_del(napi); + netdev_lock_ops_to_full(napi->dev); + napi_disable_locked(napi); + netif_napi_del_locked(napi); + netdev_unlock_full_to_ops(napi->dev); } xdp_rxq_info_unreg(&rxq->xdp_rxq); @@ -2354,14 +2530,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); + netdev_lock_ops_to_full(ndev); + netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1); + netdev_unlock_full_to_ops(ndev); WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, cq->napi.napi_id)); WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, rxq->page_pool)); - napi_enable(&cq->napi); + netdev_lock_ops_to_full(ndev); + napi_enable_locked(&cq->napi); + netdev_unlock_full_to_ops(ndev); mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); out: @@ -2611,6 +2791,88 @@ void mana_query_gf_stats(struct mana_port_context *apc) apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; } +void mana_query_phy_stats(struct mana_port_context *apc) +{ + struct mana_query_phy_stat_resp resp = {}; + struct mana_query_phy_stat_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT, + sizeof(req), sizeof(resp)); + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) + return; + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(ndev, + "Failed to query PHY stats: %d, resp:0x%x\n", + err, resp.hdr.status); + return; + } + + /* Aggregate drop counters */ + apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy; + apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy; + + /* Per TC traffic Counters */ + apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy; + apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy; + apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy; + apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy; + apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy; + apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy; + apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy; + apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy; + apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy; + apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy; + apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy; + apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy; + apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy; + apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy; + apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy; + apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy; + + /* Per TC byte Counters */ + apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy; + apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy; + apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy; + apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy; + apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy; + apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy; + apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy; + apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy; + apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy; + apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy; + apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy; + apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy; + apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy; + apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy; + apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy; + apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy; + + /* Per TC pause Counters */ + apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy; + apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy; + apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy; + apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy; + apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy; + apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy; + apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy; + apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy; + apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy; + apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy; + apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy; + apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy; + apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy; + apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy; + apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy; + apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy; +} + static int mana_init_port(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); @@ -2918,6 +3180,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, goto free_indir; } + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); + return 0; free_indir: diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index c419626073f5..a1afa75a9463 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -7,10 +7,12 @@ #include <net/mana/mana.h> -static const struct { +struct mana_stats_desc { char name[ETH_GSTRING_LEN]; u16 offset; -} mana_eth_stats[] = { +}; + +static const struct mana_stats_desc mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats, @@ -75,6 +77,59 @@ static const struct { rx_cqe_unknown_type)}, }; +static const struct mana_stats_desc mana_phy_stats[] = { + { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) }, + { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) }, + { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) }, + { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) }, + { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) }, + { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) }, + { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) }, + { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) }, + { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) }, + { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) }, + { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) }, + { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) }, + { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) }, + { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) }, + { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) }, + { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) }, + { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) }, + { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) }, + { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) }, + { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) }, + { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) }, + { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) }, + { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) }, + { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) }, + { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) }, + { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) }, + { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) }, + { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) }, + { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) }, + { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) }, + { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) }, + { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) }, + { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) }, + { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) }, + { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) }, + { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) }, + { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) }, + { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) }, + { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) }, + { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) }, + { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) }, + { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) }, + { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) }, + { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) }, + { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) }, + { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) }, + { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) }, + { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) }, + { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) }, + { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) }, +}; + static int mana_get_sset_count(struct net_device *ndev, int stringset) { struct mana_port_context *apc = netdev_priv(ndev); @@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset) if (stringset != ETH_SS_STATS) return -EINVAL; - return ARRAY_SIZE(mana_eth_stats) + num_queues * - (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); + return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + + num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); } static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) @@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) ethtool_puts(&data, mana_eth_stats[i].name); + for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++) + ethtool_puts(&data, mana_phy_stats[i].name); + for (i = 0; i < num_queues; i++) { ethtool_sprintf(&data, "rx_%d_packets", i); ethtool_sprintf(&data, "rx_%d_bytes", i); @@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev, struct mana_port_context *apc = netdev_priv(ndev); unsigned int num_queues = apc->num_queues; void *eth_stats = &apc->eth_stats; + void *phy_stats = &apc->phy_stats; struct mana_stats_rx *rx_stats; struct mana_stats_tx *tx_stats; unsigned int start; @@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev, /* we call mana function to update stats from GDMA */ mana_query_gf_stats(apc); + /* We call this mana function to get the phy stats from GDMA and includes + * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause + * counters. + */ + mana_query_phy_stats(apc); + for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); + for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++) + data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset); + for (q = 0; q < num_queues; q++) { rx_stats = &apc->rxqs[q]->stats; @@ -427,6 +495,12 @@ out: static int mana_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_query_link_cfg(apc); + cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed; + cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_OTHER; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 932f59d70f41..132626a3f9f7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2394,8 +2394,7 @@ static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table) static const struct udp_tunnel_nic_info nfp_udp_tunnels = { .sync_table = nfp_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = NFP_NET_N_VXLAN_PORTS, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 4c377bdc62c8..136bfa3516d0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -409,6 +409,7 @@ static void ionic_remove(struct pci_dev *pdev) timer_shutdown_sync(&ionic->watchdog_timer); if (ionic->lif) { + cancel_work_sync(&ionic->lif->deferred.work); /* prevent adminq cmds if already known as down */ if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 18b9c8a810ae..093c5358b6e8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -424,9 +424,9 @@ do_check_time: if (fw_hb_ready != idev->fw_hb_ready) { idev->fw_hb_ready = fw_hb_ready; if (!fw_hb_ready) - dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb); + dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb); else - dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb); + dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb); } if (!fw_hb_ready) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7707a9e53c43..48cb5d30b5f6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3526,10 +3526,6 @@ void ionic_lif_free(struct ionic_lif *lif) lif->info = NULL; lif->info_pa = 0; - /* unmap doorbell page */ - ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); - lif->kern_dbpage = NULL; - mutex_destroy(&lif->config_lock); mutex_destroy(&lif->queue_lock); @@ -3555,6 +3551,9 @@ void ionic_lif_deinit(struct ionic_lif *lif) ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; + ionic_lif_reset(lif); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index daf1e82cb76b..0e60a6bef99a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -516,9 +516,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, unsigned long start_time; unsigned long max_wait; unsigned long duration; - int done = 0; bool fw_up; int opcode; + bool done; int err; /* Wait for dev cmd to complete, retrying if we get EAGAIN, @@ -526,6 +526,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, */ max_wait = jiffies + (max_seconds * HZ); try_again: + done = false; opcode = idev->opcode; start_time = jiffies; for (fw_up = ionic_is_fw_running(idev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 985026dd816f..7e341e026489 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -987,20 +987,17 @@ static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table) static const struct udp_tunnel_nic_info qede_udp_tunnels_both = { .sync_table = qede_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, }, }, qede_udp_tunnels_vxlan = { .sync_table = qede_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, }, qede_udp_tunnels_geneve = { .sync_table = qede_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, }, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index eb69121df726..53cdd36c4123 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -486,7 +486,6 @@ static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table) static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = { .sync_table = qlcnic_udp_tunnel_sync, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 43170500d566..9c601f271c02 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -216,8 +216,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_VDEVICE(REALTEK, 0x8168) }, { PCI_VDEVICE(NCUBE, 0x8168) }, { PCI_VDEVICE(REALTEK, 0x8169) }, - { PCI_VENDOR_ID_DLINK, 0x4300, - PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, { PCI_VDEVICE(DLINK, 0x4300) }, { PCI_VDEVICE(DLINK, 0x4302) }, { PCI_VDEVICE(AT, 0xc107) }, @@ -5262,7 +5260,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_61) phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_2500baseT_Full_BIT); - phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT); /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 498cfe4d0cac..20decdeb9fdb 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -288,6 +288,7 @@ struct rtase_ring { u32 cur_idx; u32 dirty_idx; u16 index; + u8 type; struct sk_buff *skbuff[RTASE_NUM_DESC]; void *data_buf[RTASE_NUM_DESC]; diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 4d37217e9a14..ef13109c49cf 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -326,6 +326,7 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx) ring->cur_idx = 0; ring->dirty_idx = 0; ring->index = idx; + ring->type = NETDEV_QUEUE_TYPE_TX; ring->alloc_fail = 0; for (i = 0; i < RTASE_NUM_DESC; i++) { @@ -345,6 +346,9 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx) ring->ivec = &tp->int_vector[0]; list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list); } + + netif_queue_set_napi(tp->dev, ring->index, + ring->type, &ring->ivec->napi); } static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping, @@ -590,6 +594,7 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx) ring->cur_idx = 0; ring->dirty_idx = 0; ring->index = idx; + ring->type = NETDEV_QUEUE_TYPE_RX; ring->alloc_fail = 0; for (i = 0; i < RTASE_NUM_DESC; i++) @@ -597,6 +602,8 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx) ring->ring_handler = rx_handler; ring->ivec = &tp->int_vector[idx]; + netif_queue_set_napi(tp->dev, ring->index, + ring->type, &ring->ivec->napi); list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list); } @@ -1161,8 +1168,12 @@ static void rtase_down(struct net_device *dev) ivec = &tp->int_vector[i]; napi_disable(&ivec->napi); list_for_each_entry_safe(ring, tmp, &ivec->ring_list, - ring_entry) + ring_entry) { + netif_queue_set_napi(tp->dev, ring->index, + ring->type, NULL); + list_del(&ring->ring_entry); + } } netif_tx_disable(dev); @@ -1518,8 +1529,12 @@ static void rtase_sw_reset(struct net_device *dev) for (i = 0; i < tp->int_nums; i++) { ivec = &tp->int_vector[i]; list_for_each_entry_safe(ring, tmp, &ivec->ring_list, - ring_entry) + ring_entry) { + netif_queue_set_napi(tp->dev, ring->index, + ring->type, NULL); + list_del(&ring->ring_entry); + } } ret = rtase_init_ring(dev); @@ -1871,6 +1886,18 @@ static void rtase_init_netdev_ops(struct net_device *dev) dev->ethtool_ops = &rtase_ethtool_ops; } +static void rtase_init_napi(struct rtase_private *tp) +{ + u16 i; + + for (i = 0; i < tp->int_nums; i++) { + netif_napi_add_config(tp->dev, &tp->int_vector[i].napi, + tp->int_vector[i].poll, i); + netif_napi_set_irq(&tp->int_vector[i].napi, + tp->int_vector[i].irq); + } +} + static void rtase_reset_interrupt(struct pci_dev *pdev, const struct rtase_private *tp) { @@ -1956,9 +1983,6 @@ static void rtase_init_int_vector(struct rtase_private *tp) memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name)); INIT_LIST_HEAD(&tp->int_vector[0].ring_list); - netif_napi_add(tp->dev, &tp->int_vector[0].napi, - tp->int_vector[0].poll); - /* interrupt vector 1 ~ 3 */ for (i = 1; i < tp->int_nums; i++) { tp->int_vector[i].tp = tp; @@ -1972,9 +1996,6 @@ static void rtase_init_int_vector(struct rtase_private *tp) memset(tp->int_vector[i].name, 0x0, sizeof(tp->int_vector[0].name)); INIT_LIST_HEAD(&tp->int_vector[i].ring_list); - - netif_napi_add(tp->dev, &tp->int_vector[i].napi, - tp->int_vector[i].poll); } } @@ -2206,6 +2227,8 @@ static int rtase_init_one(struct pci_dev *pdev, goto err_out_del_napi; } + rtase_init_napi(tp); + rtase_init_netdev_ops(dev); dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 4a439b34114d..ad73733644f9 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -308,8 +308,8 @@ static int sxgbe_set_coalesce(struct net_device *dev, return 0; } -static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, - struct ethtool_rxnfc *cmd) +static int sxgbe_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *cmd) { cmd->data = 0; @@ -344,26 +344,11 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, return 0; } -static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *rule_locs) +static int sxgbe_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *cmd, + struct netlink_ext_ack *extack) { struct sxgbe_priv_data *priv = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXFH: - ret = sxgbe_get_rss_hash_opts(priv, cmd); - break; - default: - break; - } - - return ret; -} - -static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv, - struct ethtool_rxnfc *cmd) -{ u32 reg_val = 0; /* RSS does not support anything other than hashing @@ -421,22 +406,6 @@ static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv, return 0; } -static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct sxgbe_priv_data *priv = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = sxgbe_set_rss_hash_opt(priv, cmd); - break; - default: - break; - } - - return ret; -} - static void sxgbe_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *space) { @@ -489,8 +458,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = { .get_channels = sxgbe_get_channels, .get_coalesce = sxgbe_get_coalesce, .set_coalesce = sxgbe_set_coalesce, - .get_rxnfc = sxgbe_get_rxnfc, - .set_rxnfc = sxgbe_set_rxnfc, + .get_rxfh_fields = sxgbe_get_rxfh_fields, + .set_rxfh_fields = sxgbe_set_rxfh_fields, .get_regs = sxgbe_get_regs, .get_regs_len = sxgbe_get_regs_len, .get_eee = sxgbe_get_eee, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 47349c148c0c..fcec81f862ec 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3985,7 +3985,6 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev, static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = { .set_port = efx_ef10_udp_tnl_set_port, .unset_port = efx_ef10_udp_tnl_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 16, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 83d715544f7f..afbedca63b29 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -262,6 +262,7 @@ const struct ethtool_ops efx_ethtool_ops = { .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_per_ctx_fields = true, .rxfh_per_ctx_key = true, .cap_rss_rxnfc_adds = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ea5da5793362..cbffccb3b9af 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -396,17 +396,6 @@ enum request_irq_err { #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) -/* Physical Coding Sublayer */ -struct rgmii_adv { - unsigned int pause; - unsigned int duplex; - unsigned int lp_pause; - unsigned int lp_duplex; -}; - -#define STMMAC_PCS_PAUSE 1 -#define STMMAC_PCS_ASYM_PAUSE 2 - /* DMA HW capabilities */ struct dma_features { unsigned int mbps_10_100; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c index 3e86810717d3..32b5d1492e2e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c @@ -44,24 +44,50 @@ struct ls1x_dwmac { struct plat_stmmacenet_data *plat_dat; struct regmap *regmap; + unsigned int id; }; -static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) +struct ls1x_data { + int (*setup)(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat); + int (*init)(struct platform_device *pdev, void *bsp_priv); +}; + +static int ls1b_dwmac_setup(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) { - struct ls1x_dwmac *dwmac = priv; - struct plat_stmmacenet_data *plat = dwmac->plat_dat; - struct regmap *regmap = dwmac->regmap; + struct ls1x_dwmac *dwmac = plat_dat->bsp_priv; struct resource *res; - unsigned long reg_base; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { + /* This shouldn't fail - stmmac_get_platform_resources() + * already mapped this resource. + */ dev_err(&pdev->dev, "Could not get IO_MEM resources\n"); return -EINVAL; } - reg_base = (unsigned long)res->start; - if (reg_base == LS1B_GMAC0_BASE) { + if (res->start == LS1B_GMAC0_BASE) { + dwmac->id = 0; + } else if (res->start == LS1B_GMAC1_BASE) { + dwmac->id = 1; + } else { + dev_err(&pdev->dev, "Invalid Ethernet MAC base address %pR", + res); + return -EINVAL; + } + + return 0; +} + +static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) +{ + struct ls1x_dwmac *dwmac = priv; + struct plat_stmmacenet_data *plat = dwmac->plat_dat; + struct regmap *regmap = dwmac->regmap; + + if (dwmac->id == 0) { switch (plat->phy_interface) { case PHY_INTERFACE_MODE_RGMII_ID: regmap_update_bits(regmap, LS1X_SYSCON0, @@ -80,7 +106,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) } regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); - } else if (reg_base == LS1B_GMAC1_BASE) { + } else if (dwmac->id == 1) { regmap_update_bits(regmap, LS1X_SYSCON0, GMAC1_USE_UART1 | GMAC1_USE_UART0, GMAC1_USE_UART1 | GMAC1_USE_UART0); @@ -104,10 +130,6 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) } regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0); - } else { - dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx", - reg_base); - return -EINVAL; } return 0; @@ -143,9 +165,9 @@ static int ls1x_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; + const struct ls1x_data *data; struct regmap *regmap; struct ls1x_dwmac *dwmac; - int (*init)(struct platform_device *pdev, void *priv); int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -159,8 +181,8 @@ static int ls1x_dwmac_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(regmap), "Unable to find syscon\n"); - init = of_device_get_match_data(&pdev->dev); - if (!init) { + data = of_device_get_match_data(&pdev->dev); + if (!data) { dev_err(&pdev->dev, "No of match data provided\n"); return -EINVAL; } @@ -175,21 +197,36 @@ static int ls1x_dwmac_probe(struct platform_device *pdev) "dt configuration failed\n"); plat_dat->bsp_priv = dwmac; - plat_dat->init = init; + plat_dat->init = data->init; dwmac->plat_dat = plat_dat; dwmac->regmap = regmap; + if (data->setup) { + ret = data->setup(pdev, plat_dat); + if (ret) + return ret; + } + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } +static const struct ls1x_data ls1b_dwmac_data = { + .setup = ls1b_dwmac_setup, + .init = ls1b_dwmac_syscon_init, +}; + +static const struct ls1x_data ls1c_dwmac_data = { + .init = ls1c_dwmac_syscon_init, +}; + static const struct of_device_id ls1x_dwmac_match[] = { { .compatible = "loongson,ls1b-gmac", - .data = &ls1b_dwmac_syscon_init, + .data = &ls1b_dwmac_data, }, { .compatible = "loongson,ls1c-emac", - .data = &ls1c_dwmac_syscon_init, + .data = &ls1c_dwmac_data, }, { } }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e30bdf72331a..d8fd4d8f6ced 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -622,6 +622,11 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) } } +static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable) +{ + stmmac_pcs_ctrl_ane(priv, enable, 0, 0); +} + /* On interface toggle MAC registers gets reset. * Configure MAC block for SGMII on ethernet phy link up */ @@ -640,7 +645,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); ethqos_set_serdes_speed(ethqos, SPEED_2500); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0); + ethqos_pcs_set_inband(priv, false); break; case SPEED_1000: val &= ~ETHQOS_MAC_CTRL_PORT_SEL; @@ -648,12 +653,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); ethqos_set_serdes_speed(ethqos, SPEED_1000); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); + ethqos_pcs_set_inband(priv, true); break; case SPEED_100: val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; ethqos_set_serdes_speed(ethqos, SPEED_1000); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); + ethqos_pcs_set_inband(priv, true); break; case SPEED_10: val |= ETHQOS_MAC_CTRL_PORT_SEL; @@ -663,7 +668,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) SGMII_10M_RX_CLK_DVDR), RGMII_IO_MACRO_CONFIG); ethqos_set_serdes_speed(ethqos, SPEED_1000); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); + ethqos_pcs_set_inband(priv, true); break; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 700858ff6f7c..79b92130a03f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -24,12 +24,21 @@ #include "stmmac_platform.h" struct rk_priv_data; + +struct rk_reg_speed_data { + unsigned int rgmii_10; + unsigned int rgmii_100; + unsigned int rgmii_1000; + unsigned int rmii_10; + unsigned int rmii_100; +}; + struct rk_gmac_ops { void (*set_to_rgmii)(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay); void (*set_to_rmii)(struct rk_priv_data *bsp_priv); - void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed); - void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed); + int (*set_speed)(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed); void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input, bool enable); void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); @@ -58,7 +67,7 @@ enum rk_clocks_index { }; struct rk_priv_data { - struct platform_device *pdev; + struct device *dev; phy_interface_t phy_iface; int id; struct regulator *regulator; @@ -71,7 +80,6 @@ struct rk_priv_data { struct clk_bulk_data *clks; int num_clks; - struct clk *clk_mac; struct clk *clk_phy; struct reset_control *phy_reset; @@ -83,6 +91,64 @@ struct rk_priv_data { struct regmap *php_grf; }; +static int rk_set_reg_speed(struct rk_priv_data *bsp_priv, + const struct rk_reg_speed_data *rsd, + unsigned int reg, phy_interface_t interface, + int speed) +{ + unsigned int val; + + if (phy_interface_mode_is_rgmii(interface)) { + if (speed == SPEED_10) { + val = rsd->rgmii_10; + } else if (speed == SPEED_100) { + val = rsd->rgmii_100; + } else if (speed == SPEED_1000) { + val = rsd->rgmii_1000; + } else { + /* Phylink will not allow inappropriate speeds for + * interface modes, so this should never happen. + */ + return -EINVAL; + } + } else if (interface == PHY_INTERFACE_MODE_RMII) { + if (speed == SPEED_10) { + val = rsd->rmii_10; + } else if (speed == SPEED_100) { + val = rsd->rmii_100; + } else { + /* Phylink will not allow inappropriate speeds for + * interface modes, so this should never happen. + */ + return -EINVAL; + } + } else { + /* This should never happen, as .get_interfaces() limits + * the interface modes that are supported to RGMII and/or + * RMII. + */ + return -EINVAL; + } + + regmap_write(bsp_priv->grf, reg, val); + + return 0; + +} + +static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; + long rate; + + rate = rgmii_clock(speed); + if (rate < 0) + return rate; + + return clk_set_rate(clk_mac_speed, rate); +} + #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) @@ -177,42 +243,38 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) PX30_GMAC_PHY_INTF_SEL_RMII); } -static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int px30_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; - struct device *dev = &bsp_priv->pdev->dev; - int ret; + struct device *dev = bsp_priv->dev; + unsigned int con1; + long rate; if (!clk_mac_speed) { dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__); - return; + return -EINVAL; } if (speed == 10) { - regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, - PX30_GMAC_SPEED_10M); - - ret = clk_set_rate(clk_mac_speed, 2500000); - if (ret) - dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n", - __func__, ret); + con1 = PX30_GMAC_SPEED_10M; + rate = 2500000; } else if (speed == 100) { - regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, - PX30_GMAC_SPEED_100M); - - ret = clk_set_rate(clk_mac_speed, 25000000); - if (ret) - dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n", - __func__, ret); - + con1 = PX30_GMAC_SPEED_100M; + rate = 25000000; } else { dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + return -EINVAL; } + + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1); + + return clk_set_rate(clk_mac_speed, rate); } static const struct rk_gmac_ops px30_ops = { .set_to_rmii = px30_set_to_rmii, - .set_rmii_speed = px30_set_rmii_speed, + .set_speed = px30_set_speed, }; #define RK3128_GRF_MAC_CON0 0x0168 @@ -261,45 +323,25 @@ static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); } -static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3128_reg_speed_data = { + .rgmii_10 = RK3128_GMAC_CLK_2_5M, + .rgmii_100 = RK3128_GMAC_CLK_25M, + .rgmii_1000 = RK3128_GMAC_CLK_125M, + .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M, + .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M, +}; -static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3128_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_RMII_CLK_2_5M | - RK3128_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_RMII_CLK_25M | - RK3128_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } + return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data, + RK3128_GRF_MAC_CON1, interface, speed); } static const struct rk_gmac_ops rk3128_ops = { .set_to_rgmii = rk3128_set_to_rgmii, .set_to_rmii = rk3128_set_to_rmii, - .set_rgmii_speed = rk3128_set_rgmii_speed, - .set_rmii_speed = rk3128_set_rmii_speed, + .set_speed = rk3128_set_speed, }; #define RK3228_GRF_MAC_CON0 0x0900 @@ -358,37 +400,19 @@ static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11)); } -static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3228_reg_speed_data = { + .rgmii_10 = RK3228_GMAC_CLK_2_5M, + .rgmii_100 = RK3228_GMAC_CLK_25M, + .rgmii_1000 = RK3228_GMAC_CLK_125M, + .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M, + .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M, +}; -static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3228_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_RMII_CLK_2_5M | - RK3228_GMAC_SPEED_10M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_RMII_CLK_25M | - RK3228_GMAC_SPEED_100M); - else - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data, + RK3228_GRF_MAC_CON1, interface, speed); } static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv) @@ -402,8 +426,7 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv) static const struct rk_gmac_ops rk3228_ops = { .set_to_rgmii = rk3228_set_to_rgmii, .set_to_rmii = rk3228_set_to_rmii, - .set_rgmii_speed = rk3228_set_rgmii_speed, - .set_rmii_speed = rk3228_set_rmii_speed, + .set_speed = rk3228_set_speed, .integrated_phy_powerup = rk3228_integrated_phy_powerup, .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown, }; @@ -454,45 +477,25 @@ static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv) RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE); } -static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3288_reg_speed_data = { + .rgmii_10 = RK3288_GMAC_CLK_2_5M, + .rgmii_100 = RK3288_GMAC_CLK_25M, + .rgmii_1000 = RK3288_GMAC_CLK_125M, + .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M, + .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M, +}; -static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3288_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_RMII_CLK_2_5M | - RK3288_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_RMII_CLK_25M | - RK3288_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } + return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data, + RK3288_GRF_SOC_CON1, interface, speed); } static const struct rk_gmac_ops rk3288_ops = { .set_to_rgmii = rk3288_set_to_rgmii, .set_to_rmii = rk3288_set_to_rmii, - .set_rgmii_speed = rk3288_set_rgmii_speed, - .set_rmii_speed = rk3288_set_rmii_speed, + .set_speed = rk3288_set_speed, }; #define RK3308_GRF_MAC_CON0 0x04a0 @@ -511,24 +514,21 @@ static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv) RK3308_GMAC_PHY_INTF_SEL_RMII); } -static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; +static const struct rk_reg_speed_data rk3308_reg_speed_data = { + .rmii_10 = RK3308_GMAC_SPEED_10M, + .rmii_100 = RK3308_GMAC_SPEED_100M, +}; - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0, - RK3308_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0, - RK3308_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } +static int rk3308_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data, + RK3308_GRF_MAC_CON0, interface, speed); } static const struct rk_gmac_ops rk3308_ops = { .set_to_rmii = rk3308_set_to_rmii, - .set_rmii_speed = rk3308_set_rmii_speed, + .set_speed = rk3308_set_speed, }; #define RK3328_GRF_MAC_CON0 0x0900 @@ -590,41 +590,26 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) RK3328_GMAC_RMII_MODE); } -static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, - RK3328_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, - RK3328_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, - RK3328_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3328_reg_speed_data = { + .rgmii_10 = RK3328_GMAC_CLK_2_5M, + .rgmii_100 = RK3328_GMAC_CLK_25M, + .rgmii_1000 = RK3328_GMAC_CLK_125M, + .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M, + .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M, +}; -static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3328_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; unsigned int reg; - reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : - RK3328_GRF_MAC_CON1; - - if (speed == 10) - regmap_write(bsp_priv->grf, reg, - RK3328_GMAC_RMII_CLK_2_5M | - RK3328_GMAC_SPEED_10M); - else if (speed == 100) - regmap_write(bsp_priv->grf, reg, - RK3328_GMAC_RMII_CLK_25M | - RK3328_GMAC_SPEED_100M); + if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy) + reg = RK3328_GRF_MAC_CON2; else - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + reg = RK3328_GRF_MAC_CON1; + + return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg, + interface, speed); } static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv) @@ -638,8 +623,7 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv) static const struct rk_gmac_ops rk3328_ops = { .set_to_rgmii = rk3328_set_to_rgmii, .set_to_rmii = rk3328_set_to_rmii, - .set_rgmii_speed = rk3328_set_rgmii_speed, - .set_rmii_speed = rk3328_set_rmii_speed, + .set_speed = rk3328_set_speed, .integrated_phy_powerup = rk3328_integrated_phy_powerup, .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown, }; @@ -690,45 +674,25 @@ static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv) RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE); } -static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3366_reg_speed_data = { + .rgmii_10 = RK3366_GMAC_CLK_2_5M, + .rgmii_100 = RK3366_GMAC_CLK_25M, + .rgmii_1000 = RK3366_GMAC_CLK_125M, + .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M, + .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M, +}; -static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3366_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_RMII_CLK_2_5M | - RK3366_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_RMII_CLK_25M | - RK3366_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } + return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data, + RK3366_GRF_SOC_CON6, interface, speed); } static const struct rk_gmac_ops rk3366_ops = { .set_to_rgmii = rk3366_set_to_rgmii, .set_to_rmii = rk3366_set_to_rmii, - .set_rgmii_speed = rk3366_set_rgmii_speed, - .set_rmii_speed = rk3366_set_rmii_speed, + .set_speed = rk3366_set_speed, }; #define RK3368_GRF_SOC_CON15 0x043c @@ -777,45 +741,25 @@ static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv) RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE); } -static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3368_reg_speed_data = { + .rgmii_10 = RK3368_GMAC_CLK_2_5M, + .rgmii_100 = RK3368_GMAC_CLK_25M, + .rgmii_1000 = RK3368_GMAC_CLK_125M, + .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M, + .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M, +}; -static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3368_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_RMII_CLK_2_5M | - RK3368_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_RMII_CLK_25M | - RK3368_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } + return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data, + RK3368_GRF_SOC_CON15, interface, speed); } static const struct rk_gmac_ops rk3368_ops = { .set_to_rgmii = rk3368_set_to_rgmii, .set_to_rmii = rk3368_set_to_rmii, - .set_rgmii_speed = rk3368_set_rgmii_speed, - .set_rmii_speed = rk3368_set_rmii_speed, + .set_speed = rk3368_set_speed, }; #define RK3399_GRF_SOC_CON5 0xc214 @@ -864,45 +808,25 @@ static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv) RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE); } -static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_CLK_2_5M); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_CLK_25M); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_CLK_125M); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3399_reg_speed_data = { + .rgmii_10 = RK3399_GMAC_CLK_2_5M, + .rgmii_100 = RK3399_GMAC_CLK_25M, + .rgmii_1000 = RK3399_GMAC_CLK_125M, + .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M, + .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M, +}; -static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3399_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - - if (speed == 10) { - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_RMII_CLK_2_5M | - RK3399_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_RMII_CLK_25M | - RK3399_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } + return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data, + RK3399_GRF_SOC_CON5, interface, speed); } static const struct rk_gmac_ops rk3399_ops = { .set_to_rgmii = rk3399_set_to_rgmii, .set_to_rmii = rk3399_set_to_rmii, - .set_rgmii_speed = rk3399_set_rgmii_speed, - .set_rmii_speed = rk3399_set_rmii_speed, + .set_speed = rk3399_set_speed, }; #define RK3528_VO_GRF_GMAC_CON 0x0018 @@ -965,43 +889,34 @@ static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv) RK3528_GMAC0_CLK_RMII_DIV2); } -static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; +static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = { + .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20, + .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2, +}; - if (speed == 10) - regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, - RK3528_GMAC1_CLK_RGMII_DIV50); - else if (speed == 100) - regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, - RK3528_GMAC1_CLK_RGMII_DIV5); - else if (speed == 1000) - regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, - RK3528_GMAC1_CLK_RGMII_DIV1); - else - dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); -} +static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = { + .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50, + .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5, + .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1, + .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20, + .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2, +}; -static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3528_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; - unsigned int reg, val; + const struct rk_reg_speed_data *rsd; + unsigned int reg; - if (speed == 10) - val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 : - RK3528_GMAC0_CLK_RMII_DIV20; - else if (speed == 100) - val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 : - RK3528_GMAC0_CLK_RMII_DIV2; - else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - return; + if (bsp_priv->id == 1) { + rsd = &rk3528_gmac1_reg_speed_data; + reg = RK3528_VPU_GRF_GMAC_CON5; + } else { + rsd = &rk3528_gmac0_reg_speed_data; + reg = RK3528_VO_GRF_GMAC_CON; } - reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 : - RK3528_VO_GRF_GMAC_CON; - - regmap_write(bsp_priv->grf, reg, val); + return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed); } static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv, @@ -1035,8 +950,7 @@ static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv) static const struct rk_gmac_ops rk3528_ops = { .set_to_rgmii = rk3528_set_to_rgmii, .set_to_rmii = rk3528_set_to_rmii, - .set_rgmii_speed = rk3528_set_rgmii_speed, - .set_rmii_speed = rk3528_set_rmii_speed, + .set_speed = rk3528_set_speed, .set_clock_selection = rk3528_set_clock_selection, .integrated_phy_powerup = rk3528_integrated_phy_powerup, .integrated_phy_powerdown = rk3528_integrated_phy_powerdown, @@ -1098,30 +1012,10 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv) regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII); } -static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; - struct device *dev = &bsp_priv->pdev->dev; - long rate; - int ret; - - rate = rgmii_clock(speed); - if (rate < 0) { - dev_err(dev, "unknown speed value for GMAC speed=%d", speed); - return; - } - - ret = clk_set_rate(clk_mac_speed, rate); - if (ret) - dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n", - __func__, rate, ret); -} - static const struct rk_gmac_ops rk3568_ops = { .set_to_rgmii = rk3568_set_to_rgmii, .set_to_rmii = rk3568_set_to_rmii, - .set_rgmii_speed = rk3568_set_gmac_speed, - .set_rmii_speed = rk3568_set_gmac_speed, + .set_speed = rk_set_clk_mac_speed, .regs_valid = true, .regs = { 0xfe2a0000, /* gmac0 */ @@ -1205,42 +1099,24 @@ static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv) regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE); } -static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; - unsigned int val = 0, offset_con; +static const struct rk_reg_speed_data rk3578_reg_speed_data = { + .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50, + .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5, + .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1, + .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20, + .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2, +}; - switch (speed) { - case 10: - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) - val = RK3576_GMAC_CLK_RMII_DIV20; - else - val = RK3576_GMAC_CLK_RGMII_DIV50; - break; - case 100: - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) - val = RK3576_GMAC_CLK_RMII_DIV2; - else - val = RK3576_GMAC_CLK_RGMII_DIV5; - break; - case 1000: - if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII) - val = RK3576_GMAC_CLK_RGMII_DIV1; - else - goto err; - break; - default: - goto err; - } +static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + unsigned int offset_con; offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : RK3576_GRF_GMAC_CON0; - regmap_write(bsp_priv->grf, offset_con, val); - - return; -err: - dev_err(dev, "unknown speed value for GMAC speed=%d", speed); + return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con, + interface, speed); } static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input, @@ -1262,8 +1138,7 @@ static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input static const struct rk_gmac_ops rk3576_ops = { .set_to_rgmii = rk3576_set_to_rgmii, .set_to_rmii = rk3576_set_to_rmii, - .set_rgmii_speed = rk3576_set_gmac_speed, - .set_rmii_speed = rk3576_set_gmac_speed, + .set_speed = rk3576_set_gmac_speed, .set_clock_selection = rk3576_set_clock_selection, .php_grf_required = true, .regs_valid = true, @@ -1347,26 +1222,26 @@ static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv) RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id)); } -static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) +static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) { - struct device *dev = &bsp_priv->pdev->dev; unsigned int val = 0, id = bsp_priv->id; switch (speed) { case 10: - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + if (interface == PHY_INTERFACE_MODE_RMII) val = RK3588_GMA_CLK_RMII_DIV20(id); else val = RK3588_GMAC_CLK_RGMII_DIV50(id); break; case 100: - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + if (interface == PHY_INTERFACE_MODE_RMII) val = RK3588_GMA_CLK_RMII_DIV2(id); else val = RK3588_GMAC_CLK_RGMII_DIV5(id); break; case 1000: - if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII) + if (interface != PHY_INTERFACE_MODE_RMII) val = RK3588_GMAC_CLK_RGMII_DIV1(id); else goto err; @@ -1377,9 +1252,9 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val); - return; + return 0; err: - dev_err(dev, "unknown speed value for GMAC speed=%d", speed); + return -EINVAL; } static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input, @@ -1397,8 +1272,7 @@ static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input static const struct rk_gmac_ops rk3588_ops = { .set_to_rgmii = rk3588_set_to_rgmii, .set_to_rmii = rk3588_set_to_rmii, - .set_rgmii_speed = rk3588_set_gmac_speed, - .set_rmii_speed = rk3588_set_gmac_speed, + .set_speed = rk3588_set_gmac_speed, .set_clock_selection = rk3588_set_clock_selection, .php_grf_required = true, .regs_valid = true, @@ -1427,26 +1301,21 @@ static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv) RV1108_GMAC_PHY_INTF_SEL_RMII); } -static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct device *dev = &bsp_priv->pdev->dev; +static const struct rk_reg_speed_data rv1108_reg_speed_data = { + .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M, + .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M, +}; - if (speed == 10) { - regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, - RV1108_GMAC_RMII_CLK_2_5M | - RV1108_GMAC_SPEED_10M); - } else if (speed == 100) { - regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, - RV1108_GMAC_RMII_CLK_25M | - RV1108_GMAC_SPEED_100M); - } else { - dev_err(dev, "unknown speed value for RMII! speed=%d", speed); - } +static int rv1108_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data, + RV1108_GRF_GMAC_CON0, interface, speed); } static const struct rk_gmac_ops rv1108_ops = { .set_to_rmii = rv1108_set_to_rmii, - .set_rmii_speed = rv1108_set_rmii_speed, + .set_speed = rv1108_set_speed, }; #define RV1126_GRF_GMAC_CON0 0X0070 @@ -1501,62 +1370,17 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv) RV1126_GMAC_PHY_INTF_SEL_RMII); } -static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; - struct device *dev = &bsp_priv->pdev->dev; - long rate; - int ret; - - rate = rgmii_clock(speed); - if (rate < 0) { - dev_err(dev, "unknown speed value for RGMII speed=%d", speed); - return; - } - - ret = clk_set_rate(clk_mac_speed, rate); - if (ret) - dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n", - __func__, rate, ret); -} - -static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) -{ - struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk; - struct device *dev = &bsp_priv->pdev->dev; - unsigned long rate; - int ret; - - switch (speed) { - case 10: - rate = 2500000; - break; - case 100: - rate = 25000000; - break; - default: - dev_err(dev, "unknown speed value for RGMII speed=%d", speed); - return; - } - - ret = clk_set_rate(clk_mac_speed, rate); - if (ret) - dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n", - __func__, rate, ret); -} - static const struct rk_gmac_ops rv1126_ops = { .set_to_rgmii = rv1126_set_to_rgmii, .set_to_rmii = rv1126_set_to_rmii, - .set_rgmii_speed = rv1126_set_rgmii_speed, - .set_rmii_speed = rv1126_set_rmii_speed, + .set_speed = rk_set_clk_mac_speed, }; static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) { struct rk_priv_data *bsp_priv = plat->bsp_priv; - struct device *dev = &bsp_priv->pdev->dev; int phy_iface = bsp_priv->phy_iface; + struct device *dev = bsp_priv->dev; int i, j, ret; bsp_priv->clk_enabled = false; @@ -1583,16 +1407,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) if (ret) return dev_err_probe(dev, ret, "Failed to get clocks\n"); - /* "stmmaceth" will be enabled by the core */ - bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth"); - ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac); - if (ret) - return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n"); - if (bsp_priv->clock_input) { dev_info(dev, "clock input from PHY\n"); } else if (phy_iface == PHY_INTERFACE_MODE_RMII) { - clk_set_rate(bsp_priv->clk_mac, 50000000); + clk_set_rate(plat->stmmac_clk, 50000000); } if (plat->phy_node && bsp_priv->integrated_phy) { @@ -1648,8 +1466,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) { struct regulator *ldo = bsp_priv->regulator; + struct device *dev = bsp_priv->dev; int ret; - struct device *dev = &bsp_priv->pdev->dev; if (enable) { ret = regulator_enable(ldo); @@ -1773,7 +1591,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, dev_info(dev, "integrated PHY? (%s).\n", bsp_priv->integrated_phy ? "yes" : "no"); - bsp_priv->pdev = pdev; + bsp_priv->dev = dev; return bsp_priv; } @@ -1793,7 +1611,7 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv) return -EINVAL; break; default: - dev_err(&bsp_priv->pdev->dev, + dev_err(bsp_priv->dev, "unsupported interface %d", bsp_priv->phy_iface); } return 0; @@ -1801,8 +1619,8 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv) static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) { + struct device *dev = bsp_priv->dev; int ret; - struct device *dev = &bsp_priv->pdev->dev; ret = rk_gmac_check_ops(bsp_priv); if (ret) @@ -1858,35 +1676,34 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac) if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown) gmac->ops->integrated_phy_powerdown(gmac); - pm_runtime_put_sync(&gmac->pdev->dev); + pm_runtime_put_sync(gmac->dev); phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } +static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv, + unsigned long *interfaces) +{ + struct rk_priv_data *rk = bsp_priv; + + if (rk->ops->set_to_rgmii) + phy_interface_set_rgmii(interfaces); + + if (rk->ops->set_to_rmii) + __set_bit(PHY_INTERFACE_MODE_RMII, interfaces); +} + static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i, phy_interface_t interface, int speed) { struct rk_priv_data *bsp_priv = bsp_priv_; - struct device *dev = &bsp_priv->pdev->dev; - switch (bsp_priv->phy_iface) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - if (bsp_priv->ops->set_rgmii_speed) - bsp_priv->ops->set_rgmii_speed(bsp_priv, speed); - break; - case PHY_INTERFACE_MODE_RMII: - if (bsp_priv->ops->set_rmii_speed) - bsp_priv->ops->set_rmii_speed(bsp_priv, speed); - break; - default: - dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); - } + if (bsp_priv->ops->set_speed) + return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface, + speed); - return 0; + return -EINVAL; } static int rk_gmac_probe(struct platform_device *pdev) @@ -1919,6 +1736,7 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->tx_fifo_size = 2048; } + plat_dat->get_interfaces = rk_get_interfaces; plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate; plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index 5e6ac82a89b9..bd65d4239054 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -48,7 +48,6 @@ struct visconti_eth { void __iomem *reg; - u32 phy_intf_sel; struct clk *phy_ref_clk; struct device *dev; }; @@ -57,42 +56,35 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, phy_interface_t interface, int speed) { struct visconti_eth *dwmac = bsp_priv; - struct net_device *netdev = dev_get_drvdata(dwmac->dev); - unsigned int val, clk_sel_val = 0; - - switch (speed) { - case SPEED_1000: - if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII) - clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M; - break; - case SPEED_100: - if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII) - clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M; - if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII) - clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2; - break; - case SPEED_10: - if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII) - clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M; - if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII) - clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20; - break; - default: - /* No bit control */ - netdev_err(netdev, "Unsupported speed request (%d)", speed); - return -EINVAL; - } - - /* Stop internal clock */ - val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL); - val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN); - val |= ETHER_CLK_SEL_TX_O_E_N_IN; - writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); + unsigned long clk_sel, val; + + if (phy_interface_mode_is_rgmii(interface)) { + switch (speed) { + case SPEED_1000: + clk_sel = ETHER_CLK_SEL_FREQ_SEL_125M; + break; + + case SPEED_100: + clk_sel = ETHER_CLK_SEL_FREQ_SEL_25M; + break; + + case SPEED_10: + clk_sel = ETHER_CLK_SEL_FREQ_SEL_2P5M; + break; + + default: + return -EINVAL; + } + + /* Stop internal clock */ + val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL); + val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | + ETHER_CLK_SEL_RX_TX_CLK_EN); + val |= ETHER_CLK_SEL_TX_O_E_N_IN; + writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); - /* Set Clock-Mux, Start clock, Set TX_O direction */ - switch (dwmac->phy_intf_sel) { - case ETHER_CONFIG_INTF_RGMII: - val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC; + /* Set Clock-Mux, Start clock, Set TX_O direction */ + val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); val |= ETHER_CLK_SEL_RX_TX_CLK_EN; @@ -100,11 +92,32 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, val &= ~ETHER_CLK_SEL_TX_O_E_N_IN; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); - break; - case ETHER_CONFIG_INTF_RMII: - val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV | - ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN | - ETHER_CLK_SEL_RMII_CLK_SEL_RX_C; + } else if (interface == PHY_INTERFACE_MODE_RMII) { + switch (speed) { + case SPEED_100: + clk_sel = ETHER_CLK_SEL_DIV_SEL_2; + break; + + case SPEED_10: + clk_sel = ETHER_CLK_SEL_DIV_SEL_20; + break; + + default: + return -EINVAL; + } + + /* Stop internal clock */ + val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL); + val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | + ETHER_CLK_SEL_RX_TX_CLK_EN); + val |= ETHER_CLK_SEL_TX_O_E_N_IN; + writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); + + /* Set Clock-Mux, Start clock, Set TX_O direction */ + val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV | + ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | + ETHER_CLK_SEL_TX_O_E_N_IN | + ETHER_CLK_SEL_RMII_CLK_SEL_RX_C; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); val |= ETHER_CLK_SEL_RMII_CLK_RST; @@ -112,16 +125,22 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); - break; - case ETHER_CONFIG_INTF_MII: - default: - val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC | - ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN; + } else { + /* Stop internal clock */ + val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL); + val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | + ETHER_CLK_SEL_RX_TX_CLK_EN); + val |= ETHER_CLK_SEL_TX_O_E_N_IN; + writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); + + /* Set Clock-Mux, Start clock, Set TX_O direction */ + val = ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC | + ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | + ETHER_CLK_SEL_TX_O_E_N_IN; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); val |= ETHER_CLK_SEL_RX_TX_CLK_EN; writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL); - break; } return 0; @@ -130,28 +149,28 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat) { struct visconti_eth *dwmac = plat_dat->bsp_priv; - unsigned int reg_val, clk_sel_val; + unsigned int clk_sel_val; + u32 phy_intf_sel; switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII; + phy_intf_sel = ETHER_CONFIG_INTF_RGMII; break; case PHY_INTERFACE_MODE_MII: - dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII; + phy_intf_sel = ETHER_CONFIG_INTF_MII; break; case PHY_INTERFACE_MODE_RMII: - dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII; + phy_intf_sel = ETHER_CONFIG_INTF_RMII; break; default: dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface); return -EOPNOTSUPP; } - reg_val = dwmac->phy_intf_sel; - writel(reg_val, dwmac->reg + REG_ETHER_CONTROL); + writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL); /* Enable TX/RX clock */ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M; @@ -161,8 +180,8 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac dwmac->reg + REG_ETHER_CLOCK_SEL); /* release internal-reset */ - reg_val |= ETHER_ETH_CONTROL_RESET; - writel(reg_val, dwmac->reg + REG_ETHER_CONTROL); + phy_intf_sel |= ETHER_ETH_CONTROL_RESET; + writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 56b76aaa58f0..fe776ddf6889 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -393,15 +393,10 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) writel(value, ioaddr + LPI_TIMER_CTRL); } -static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, - bool loopback) +static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane, + bool srgmi_ral, bool loopback) { - dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); -} - -static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) -{ - dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -508,7 +503,6 @@ const struct stmmac_ops dwmac1000_ops = { .set_eee_pls = dwmac1000_set_eee_pls, .debug = dwmac1000_debug, .pcs_ctrl_ane = dwmac1000_ctrl_ane, - .pcs_get_adv_lp = dwmac1000_get_adv_lp, .set_mac_loopback = dwmac1000_set_mac_loopback, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 9c2549d4100f..d85bc0bb5c3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -583,15 +583,10 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } } -static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, +static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral, bool loopback) { - dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); -} - -static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) -{ - dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } /* RGMII or SMII interface */ @@ -958,7 +953,6 @@ const struct stmmac_ops dwmac4_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .set_mac_loopback = dwmac4_set_mac_loopback, @@ -993,7 +987,6 @@ const struct stmmac_ops dwmac410_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .flex_pps_config = dwmac5_flex_pps_config, @@ -1030,7 +1023,6 @@ const struct stmmac_ops dwmac510_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .safety_feat_config = dwmac5_safety_feat_config, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index ae4efffb785f..14dbe0685997 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -300,7 +300,6 @@ struct stmmac_dma_ops { struct mac_device_info; struct net_device; -struct rgmii_adv; struct stmmac_tc_entry; struct stmmac_pps_cfg; struct stmmac_rss; @@ -375,9 +374,8 @@ struct stmmac_ops { struct stmmac_extra_stats *x, u32 rx_queues, u32 tx_queues); /* PCS calls */ - void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, bool srgmi_ral, bool loopback); - void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); /* Safety Features */ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_feature_cfg *safety_cfg); @@ -466,9 +464,7 @@ struct stmmac_ops { #define stmmac_mac_debug(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, debug, __priv, __args) #define stmmac_pcs_ctrl_ane(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) -#define stmmac_pcs_get_adv_lp(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __priv, __args) #define stmmac_safety_feat_config(__priv, __args...) \ stmmac_do_callback(__priv, mac, safety_feat_config, __args) #define stmmac_safety_feat_irq_status(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index f702f7b7bf9f..77758a7299b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -325,7 +325,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) && (priv->hw->pcs & STMMAC_PCS_RGMII || priv->hw->pcs & STMMAC_PCS_SGMII)) { - struct rgmii_adv adv; u32 supported, advertising, lp_advertising; if (!priv->xstats.pcs_link) { @@ -337,10 +336,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, cmd->base.speed = priv->xstats.pcs_speed; - /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) - return -EOPNOTSUPP; /* should never happen indeed */ - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ ethtool_convert_link_mode_to_legacy_u32( @@ -350,44 +345,12 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, ethtool_convert_link_mode_to_legacy_u32( &lp_advertising, cmd->link_modes.lp_advertising); - if (adv.pause & STMMAC_PCS_PAUSE) - advertising |= ADVERTISED_Pause; - if (adv.pause & STMMAC_PCS_ASYM_PAUSE) - advertising |= ADVERTISED_Asym_Pause; - if (adv.lp_pause & STMMAC_PCS_PAUSE) - lp_advertising |= ADVERTISED_Pause; - if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) - lp_advertising |= ADVERTISED_Asym_Pause; - /* Reg49[3] always set because ANE is always supported */ cmd->base.autoneg = ADVERTISED_Autoneg; supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; lp_advertising |= ADVERTISED_Autoneg; - if (adv.duplex) { - supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full | - SUPPORTED_10baseT_Full); - advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); - } else { - supported |= (SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); - advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); - } - if (adv.lp_duplex) - lp_advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); - else - lp_advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); cmd->base.port = PORT_OTHER; ethtool_convert_legacy_u32_to_link_mode( @@ -417,7 +380,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, return -EINVAL; mutex_lock(&priv->lock); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); mutex_unlock(&priv->lock); return 0; @@ -515,12 +478,9 @@ stmmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); - struct rgmii_adv adv_lp; - if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + if (priv->hw->pcs) { pause->autoneg = 1; - if (!adv_lp.pause) - return; } else { phylink_ethtool_get_pauseparam(priv->phylink, pause); } @@ -531,12 +491,9 @@ stmmac_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); - struct rgmii_adv adv_lp; - if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + if (priv->hw->pcs) { pause->autoneg = 1; - if (!adv_lp.pause) - return -EOPNOTSUPP; return 0; } else { return phylink_ethtool_set_pauseparam(priv->phylink, pause); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b948df1bff9a..f350a6662880 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1062,8 +1062,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, interface, speed); if (ret < 0) netdev_err(priv->dev, - "failed to configure transmit clock for %dMbps: %pe\n", - speed, ERR_PTR(ret)); + "failed to configure %s transmit clock for %dMbps: %pe\n", + phy_modes(interface), speed, ERR_PTR(ret)); } stmmac_mac_set(priv, priv->ioaddr, true); @@ -3586,7 +3586,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) } if (priv->hw->pcs) - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); /* set TX and RX rings length */ stmmac_set_rings_length(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index 1bdf87b237c4..4a684c97dfae 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -16,6 +16,8 @@ /* PCS registers (AN/TBI/SGMII/RGMII) offsets */ #define GMAC_AN_CTRL(x) (x) /* AN control */ #define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */ + +/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */ #define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */ #define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */ #define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */ @@ -107,34 +109,4 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, writel(value, ioaddr + GMAC_AN_CTRL(reg)); } - -/** - * dwmac_get_adv_lp - Get ADV and LP cap - * @ioaddr: IO registers pointer - * @reg: Base address of the AN Control Register. - * @adv_lp: structure to store the adv,lp status - * Description: this is to expose the ANE advertisement and Link partner ability - * status to ethtool support. - */ -static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg, - struct rgmii_adv *adv_lp) -{ - u32 value = readl(ioaddr + GMAC_ANE_ADV(reg)); - - if (value & GMAC_ANE_FD) - adv_lp->duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv_lp->duplex |= DUPLEX_HALF; - - adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; - - value = readl(ioaddr + GMAC_ANE_LPA(reg)); - - if (value & GMAC_ANE_FD) - adv_lp->lp_duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv_lp->lp_duplex = DUPLEX_HALF; - - adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; -} #endif /* __STMMAC_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index b80c1efdb323..4164b3a580d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -579,6 +579,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->pmt = 1; if (of_property_read_bool(np, "snps,tso")) plat->flags |= STMMAC_FLAG_TSO_EN; + of_property_read_u32(np, "snps,multicast-filter-bins", + &plat->multicast_filter_bins); } dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index ddca8fc7883e..75d7e10944d4 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -7077,8 +7077,10 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) } -static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) +static int niu_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *nfc) { + struct niu *np = netdev_priv(dev); u64 class; nfc->data = 0; @@ -7290,9 +7292,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int ret = 0; switch (cmd->cmd) { - case ETHTOOL_GRXFH: - ret = niu_get_hash_opts(np, cmd); - break; case ETHTOOL_GRXRINGS: cmd->data = np->num_rx_rings; break; @@ -7313,8 +7312,11 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) +static int niu_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { + struct niu *np = netdev_priv(dev); u64 class; u64 flow_key = 0; unsigned long flags; @@ -7656,9 +7658,6 @@ static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) int ret = 0; switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = niu_set_hash_opts(np, cmd); - break; case ETHTOOL_SRXCLSRLINS: ret = niu_add_ethtool_tcam_entry(np, cmd); break; @@ -7912,6 +7911,8 @@ static const struct ethtool_ops niu_ethtool_ops = { .set_phys_id = niu_set_phys_id, .get_rxnfc = niu_get_nfc, .set_rxnfc = niu_set_nfc, + .get_rxfh_fields = niu_get_rxfh_fields, + .set_rxfh_fields = niu_set_rxfh_fields, .get_link_ksettings = niu_get_link_ksettings, .set_link_ksettings = niu_set_link_ksettings, }; diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 5b8fdb882172..12f25cec6255 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -98,20 +98,11 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, { struct cppi5_host_desc_t *first_desc, *next_desc; dma_addr_t buf_dma, next_desc_dma; - struct prueth_swdata *swdata; - struct page *page; u32 buf_dma_len; first_desc = desc; next_desc = first_desc; - swdata = cppi5_hdesc_get_swdata(desc); - if (swdata->type == PRUETH_SWDATA_PAGE) { - page = swdata->data.page; - page_pool_recycle_direct(page->pp, swdata->data.page); - goto free_desc; - } - cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); @@ -135,7 +126,6 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); } -free_desc: k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } EXPORT_SYMBOL_GPL(prueth_xmit_free); @@ -612,13 +602,8 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len); swdata = cppi5_hdesc_get_swdata(first_desc); - if (page) { - swdata->type = PRUETH_SWDATA_PAGE; - swdata->data.page = page; - } else { - swdata->type = PRUETH_SWDATA_XDPF; - swdata->data.xdpf = xdpf; - } + swdata->type = PRUETH_SWDATA_XDPF; + swdata->data.xdpf = xdpf; /* Report BQL before sending the packet */ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 86fc1278127c..a1e013b0a0eb 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -125,45 +125,6 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static struct icssg_firmwares icssg_hsr_firmwares[] = { - { - .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", - }, - { - .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", - } -}; - -static struct icssg_firmwares icssg_switch_firmwares[] = { - { - .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", - }, - { - .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", - } -}; - -static struct icssg_firmwares icssg_emac_firmwares[] = { - { - .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", - }, - { - .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", - .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", - .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", - } -}; - static int prueth_start(struct rproc *rproc, const char *fw_name) { int ret; @@ -186,11 +147,11 @@ static int prueth_emac_start(struct prueth *prueth) int ret, slice; if (prueth->is_switch_mode) - firmwares = icssg_switch_firmwares; + firmwares = prueth->icssg_switch_firmwares; else if (prueth->is_hsr_offload_mode) - firmwares = icssg_hsr_firmwares; + firmwares = prueth->icssg_hsr_firmwares; else - firmwares = icssg_emac_firmwares; + firmwares = prueth->icssg_emac_firmwares; for (slice = 0; slice < PRUETH_NUM_MACS; slice++) { ret = prueth_start(prueth->pru[slice], firmwares[slice].pru); @@ -1632,6 +1593,87 @@ static void prueth_unregister_notifiers(struct prueth *prueth) unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); } +static void icssg_read_firmware_names(struct device_node *np, + struct icssg_firmwares *fw) +{ + int i; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + of_property_read_string_index(np, "firmware-name", i * 3 + 0, + &fw[i].pru); + of_property_read_string_index(np, "firmware-name", i * 3 + 1, + &fw[i].rtu); + of_property_read_string_index(np, "firmware-name", i * 3 + 2, + &fw[i].txpru); + } +} + +/* icssg_firmware_name_replace - Replace a substring in firmware name + * @dev: device pointer for memory allocation + * @src: source firmware name string + * @from: substring to replace + * @to: replacement substring + * + * Return: a newly allocated string with the replacement, or the original + * string if replacement is not possible. + */ +static const char *icssg_firmware_name_replace(struct device *dev, + const char *src, + const char *from, + const char *to) +{ + size_t prefix, from_len, to_len, total; + const char *p = strstr(src, from); + char *buf; + + if (!p) + return src; /* fallback: no replacement, use original */ + + prefix = p - src; + from_len = strlen(from); + to_len = strlen(to); + total = strlen(src) - from_len + to_len + 1; + + buf = devm_kzalloc(dev, total, GFP_KERNEL); + if (!buf) + return src; /* fallback: allocation failed, use original */ + + strscpy(buf, src, prefix + 1); + strscpy(buf + prefix, to, to_len + 1); + strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len); + + return buf; +} + +/** + * icssg_mode_firmware_names - Generate firmware names for a specific mode + * @dev: device pointer for logging and context + * @src: source array of firmware name structures + * @dst: destination array to store updated firmware name structures + * @from: substring in firmware names to be replaced + * @to: substring to replace @from in firmware names + * + * Iterates over all MACs and replaces occurrences of the @from substring + * with @to in the firmware names (pru, rtu, txpru) for each MAC. The + * updated firmware names are stored in the @dst array. + */ +static void icssg_mode_firmware_names(struct device *dev, + struct icssg_firmwares *src, + struct icssg_firmwares *dst, + const char *from, const char *to) +{ + int i; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru, + from, to); + dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu, + from, to); + dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru, + from, to); + } +} + static int prueth_probe(struct platform_device *pdev) { struct device_node *eth_node, *eth_ports_node; @@ -1808,6 +1850,15 @@ static int prueth_probe(struct platform_device *pdev) icss_iep_init_fw(prueth->iep1); } + /* Read EMAC firmware names from device tree */ + icssg_read_firmware_names(np, prueth->icssg_emac_firmwares); + + /* Generate other mode firmware names based on EMAC firmware names */ + icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares, + prueth->icssg_switch_firmwares, "eth", "sw"); + icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares, + prueth->icssg_hsr_firmwares, "eth", "hsr"); + spin_lock_init(&prueth->vtbl_lock); spin_lock_init(&prueth->stats_lock); /* setup netdev interfaces */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 23c465f1ce7f..c03e3b3626c1 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -259,9 +259,9 @@ struct prueth_pdata { }; struct icssg_firmwares { - char *pru; - char *rtu; - char *txpru; + const char *pru; + const char *rtu; + const char *txpru; }; /** @@ -300,6 +300,9 @@ struct icssg_firmwares { * @is_switchmode_supported: indicates platform support for switch mode * @switch_id: ID for mapping switch ports to bridge * @default_vlan: Default VLAN for host + * @icssg_emac_firmwares: Firmware names for EMAC mode, indexed per MAC + * @icssg_switch_firmwares: Firmware names for SWITCH mode, indexed per MAC + * @icssg_hsr_firmwares: Firmware names for HSR mode, indexed per MAC */ struct prueth { struct device *dev; @@ -343,6 +346,9 @@ struct prueth { spinlock_t vtbl_lock; /** @stats_lock: Lock for reading icssg stats */ spinlock_t stats_lock; + struct icssg_firmwares icssg_emac_firmwares[PRUETH_NUM_MACS]; + struct icssg_firmwares icssg_switch_firmwares[PRUETH_NUM_MACS]; + struct icssg_firmwares icssg_hsr_firmwares[PRUETH_NUM_MACS]; }; struct emac_tx_ts_response { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ffc15a432689..54384f9b3872 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -41,6 +41,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); /* per-network namespace private data for this module */ struct geneve_net { struct list_head geneve_list; + /* sock_list is protected by rtnl lock */ struct list_head sock_list; }; @@ -921,8 +922,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), - !test_bit(IP_TUNNEL_CSUM_BIT, - info->key.tun_flags)); + !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags), + 0); return 0; } @@ -1014,7 +1015,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, !test_bit(IP_TUNNEL_CSUM_BIT, - info->key.tun_flags)); + info->key.tun_flags), + 0); return 0; } #endif @@ -1179,8 +1181,9 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push) struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; - rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) { + ASSERT_RTNL(); + + list_for_each_entry(gs, &gn->sock_list, list) { if (push) { udp_tunnel_push_rx_port(dev, gs->sock, UDP_TUNNEL_TYPE_GENEVE); @@ -1189,7 +1192,6 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push) UDP_TUNNEL_TYPE_GENEVE); } } - rcu_read_unlock(); } /* Initialize the device structure. */ diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index d4dec741c7f4..4b668ebaa0f7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -446,7 +446,8 @@ static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb) htons(GTP0_PORT), htons(GTP0_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), - false); + false, + 0); return 0; } @@ -704,7 +705,8 @@ static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) htons(GTP1U_PORT), htons(GTP1U_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), - false); + false, + 0); return 0; } @@ -1304,7 +1306,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) pktinfo.gtph_port, pktinfo.gtph_port, !net_eq(sock_net(pktinfo.pctx->sk), dev_net(dev)), - false); + false, 0); break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) @@ -1314,7 +1316,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) ip6_dst_hoplimit(&pktinfo.rt->dst), 0, pktinfo.gtph_port, pktinfo.gtph_port, - false); + false, 0); #else goto tx_err; #endif @@ -2405,7 +2407,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info) port, port, !net_eq(sock_net(sk), dev_net(gtp->dev)), - false); + false, 0); return 0; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c41a025c66f0..42d98e99566e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1580,9 +1580,10 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static int -netvsc_get_rss_hash_opts(struct net_device_context *ndc, - struct ethtool_rxnfc *info) +netvsc_get_rxfh_fields(struct net_device *ndev, + struct ethtool_rxfh_fields *info) { + struct net_device_context *ndc = netdev_priv(ndev); const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; info->data = RXH_IP_SRC | RXH_IP_DST; @@ -1637,16 +1638,17 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = nvdev->num_chn; return 0; - - case ETHTOOL_GRXFH: - return netvsc_get_rss_hash_opts(ndc, info); } return -EOPNOTSUPP; } -static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, - struct ethtool_rxnfc *info) +static int +netvsc_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) { + struct net_device_context *ndc = netdev_priv(dev); + if (info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { switch (info->flow_type) { @@ -1701,17 +1703,6 @@ static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, return -EOPNOTSUPP; } -static int -netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) -{ - struct net_device_context *ndc = netdev_priv(ndev); - - if (info->cmd == ETHTOOL_SRXFH) - return netvsc_set_rss_hash_opts(ndc, info); - - return -EOPNOTSUPP; -} - static u32 netvsc_get_rxfh_key_size(struct net_device *dev) { return NETVSC_HASH_KEYLEN; @@ -1979,11 +1970,12 @@ static const struct ethtool_ops ethtool_ops = { .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = netvsc_get_rxnfc, - .set_rxnfc = netvsc_set_rxnfc, .get_rxfh_key_size = netvsc_get_rxfh_key_size, .get_rxfh_indir_size = netvsc_rss_indir_size, .get_rxfh = netvsc_get_rxfh, .set_rxfh = netvsc_set_rxfh, + .get_rxfh_fields = netvsc_get_rxfh_fields, + .set_rxfh_fields = netvsc_set_rxfh_fields, .get_link_ksettings = netvsc_get_link_ksettings, .set_link_ksettings = netvsc_set_link_ksettings, .get_ringparam = netvsc_get_ringparam, diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index aea0f0357568..9b41d4697a40 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -18,7 +18,8 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors"); static struct pse_control * -fwnode_find_pse_control(struct fwnode_handle *fwnode) +fwnode_find_pse_control(struct fwnode_handle *fwnode, + struct phy_device *phydev) { struct pse_control *psec; struct device_node *np; @@ -30,7 +31,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode) if (!np) return NULL; - psec = of_pse_control_get(np); + psec = of_pse_control_get(np, phydev); if (PTR_ERR(psec) == -ENOENT) return NULL; @@ -128,15 +129,9 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, u32 phy_id; int rc; - psec = fwnode_find_pse_control(child); - if (IS_ERR(psec)) - return PTR_ERR(psec); - mii_ts = fwnode_find_mii_timestamper(child); - if (IS_ERR(mii_ts)) { - rc = PTR_ERR(mii_ts); - goto clean_pse; - } + if (IS_ERR(mii_ts)) + return PTR_ERR(mii_ts); is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"); if (is_c45 || fwnode_get_phy_id(child, &phy_id)) @@ -169,6 +164,12 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, goto clean_phy; } + psec = fwnode_find_pse_control(child, phy); + if (IS_ERR(psec)) { + rc = PTR_ERR(psec); + goto unregister_phy; + } + phy->psec = psec; /* phy->mii_ts may already be defined by the PHY driver. A @@ -180,12 +181,13 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, return 0; +unregister_phy: + if (is_acpi_node(child) || is_of_node(child)) + phy_device_remove(phy); clean_phy: phy_device_free(phy); clean_mii_ts: unregister_mii_timestamper(mii_ts); -clean_pse: - pse_control_put(psec); return rc; } diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index ef77bd1abae9..fefa40ea5227 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -30,8 +30,7 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, values[0] = desired_child; - gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, - s->gpios->info, values); + gpiod_multi_set_value_cansleep(s->gpios, values); return 0; } diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 176935a8645f..e3722de08ea9 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -86,10 +86,10 @@ static DEFINE_SPINLOCK(target_list_lock); static DEFINE_MUTEX(target_cleanup_list_lock); /* - * Console driver for extended netconsoles. Registered on the first use to - * avoid unnecessarily enabling ext message formatting. + * Console driver for netconsoles. Register only consoles that have + * an associated target of the same type. */ -static struct console netconsole_ext; +static struct console netconsole_ext, netconsole; struct netconsole_target_stats { u64_stats_t xmit_drop_count; @@ -97,6 +97,11 @@ struct netconsole_target_stats { struct u64_stats_sync syncp; }; +enum console_type { + CONS_BASIC = BIT(0), + CONS_EXTENDED = BIT(1), +}; + /* Features enabled in sysdata. Contrary to userdata, this data is populated by * the kernel. The fields are designed as bitwise flags, allowing multiple * features to be set in sysdata_fields. @@ -108,6 +113,8 @@ enum sysdata_feature { SYSDATA_TASKNAME = BIT(1), /* Kernel release/version as part of sysdata */ SYSDATA_RELEASE = BIT(2), + /* Include a per-target message ID as part of sysdata */ + SYSDATA_MSGID = BIT(3), }; /** @@ -118,6 +125,7 @@ enum sysdata_feature { * @extradata_complete: Cached, formatted string of append * @userdata_length: String length of usedata in extradata_complete. * @sysdata_fields: Sysdata features enabled. + * @msgcounter: Message sent counter. * @stats: Packet send stats for the target. Used for debugging. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). @@ -148,6 +156,8 @@ struct netconsole_target { size_t userdata_length; /* bit-wise with sysdata_feature bits */ u32 sysdata_fields; + /* protected by target_list_lock */ + u32 msgcounter; #endif struct netconsole_target_stats stats; bool enabled; @@ -273,6 +283,23 @@ static void netconsole_process_cleanups_core(void) mutex_unlock(&target_cleanup_list_lock); } +static void netconsole_print_banner(struct netpoll *np) +{ + np_info(np, "local port %d\n", np->local_port); + if (np->ipv6) + np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6); + else + np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip); + np_info(np, "interface name '%s'\n", np->dev_name); + np_info(np, "local ethernet address '%pM'\n", np->dev_mac); + np_info(np, "remote port %d\n", np->remote_port); + if (np->ipv6) + np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6); + else + np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip); + np_info(np, "remote ethernet address %pM\n", np->remote_mac); +} + #ifdef CONFIG_NETCONSOLE_DYNAMIC /* @@ -455,6 +482,46 @@ static ssize_t sysdata_release_enabled_show(struct config_item *item, return sysfs_emit(buf, "%d\n", release_enabled); } +/* Iterate in the list of target, and make sure we don't have any console + * register without targets of the same type + */ +static void unregister_netcons_consoles(void) +{ + struct netconsole_target *nt; + u32 console_type_needed = 0; + unsigned long flags; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + if (nt->extended) + console_type_needed |= CONS_EXTENDED; + else + console_type_needed |= CONS_BASIC; + } + spin_unlock_irqrestore(&target_list_lock, flags); + + if (!(console_type_needed & CONS_EXTENDED) && + console_is_registered(&netconsole_ext)) + unregister_console(&netconsole_ext); + + if (!(console_type_needed & CONS_BASIC) && + console_is_registered(&netconsole)) + unregister_console(&netconsole); +} + +static ssize_t sysdata_msgid_enabled_show(struct config_item *item, + char *buf) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool msgid_enabled; + + mutex_lock(&dynamic_netconsole_mutex); + msgid_enabled = !!(nt->sysdata_fields & SYSDATA_MSGID); + mutex_unlock(&dynamic_netconsole_mutex); + + return sysfs_emit(buf, "%d\n", msgid_enabled); +} + /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. @@ -488,14 +555,24 @@ static ssize_t enabled_store(struct config_item *item, goto out_unlock; } - if (nt->extended && !console_is_registered(&netconsole_ext)) + if (nt->extended && !console_is_registered(&netconsole_ext)) { + netconsole_ext.flags |= CON_ENABLED; register_console(&netconsole_ext); + } + + /* User might be enabling the basic format target for the very + * first time, make sure the console is registered. + */ + if (!nt->extended && !console_is_registered(&netconsole)) { + netconsole.flags |= CON_ENABLED; + register_console(&netconsole); + } /* - * Skip netpoll_parse_options() -- all the attributes are + * Skip netconsole_parser_cmdline() -- all the attributes are * already configured via configfs. Just print them out. */ - netpoll_print_options(&nt->np); + netconsole_print_banner(&nt->np); ret = netpoll_setup(&nt->np); if (ret) @@ -517,6 +594,10 @@ static ssize_t enabled_store(struct config_item *item, list_move(&nt->list, &target_cleanup_list); spin_unlock_irqrestore(&target_list_lock, flags); mutex_unlock(&target_cleanup_list_lock); + /* Unregister consoles, whose the last target of that type got + * disabled. + */ + unregister_netcons_consoles(); } ret = strnlen(buf, count); @@ -736,6 +817,8 @@ static size_t count_extradata_entries(struct netconsole_target *nt) entries += 1; if (nt->sysdata_fields & SYSDATA_RELEASE) entries += 1; + if (nt->sysdata_fields & SYSDATA_MSGID) + entries += 1; return entries; } @@ -872,6 +955,40 @@ static void disable_sysdata_feature(struct netconsole_target *nt, nt->extradata_complete[nt->userdata_length] = 0; } +static ssize_t sysdata_msgid_enabled_store(struct config_item *item, + const char *buf, size_t count) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool msgid_enabled, curr; + ssize_t ret; + + ret = kstrtobool(buf, &msgid_enabled); + if (ret) + return ret; + + mutex_lock(&dynamic_netconsole_mutex); + curr = !!(nt->sysdata_fields & SYSDATA_MSGID); + if (msgid_enabled == curr) + goto unlock_ok; + + if (msgid_enabled && + count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { + ret = -ENOSPC; + goto unlock; + } + + if (msgid_enabled) + nt->sysdata_fields |= SYSDATA_MSGID; + else + disable_sysdata_feature(nt, SYSDATA_MSGID); + +unlock_ok: + ret = strnlen(buf, count); +unlock: + mutex_unlock(&dynamic_netconsole_mutex); + return ret; +} + static ssize_t sysdata_release_enabled_store(struct config_item *item, const char *buf, size_t count) { @@ -987,6 +1104,7 @@ CONFIGFS_ATTR(userdatum_, value); CONFIGFS_ATTR(sysdata_, cpu_nr_enabled); CONFIGFS_ATTR(sysdata_, taskname_enabled); CONFIGFS_ATTR(sysdata_, release_enabled); +CONFIGFS_ATTR(sysdata_, msgid_enabled); static struct configfs_attribute *userdatum_attrs[] = { &userdatum_attr_value, @@ -1049,6 +1167,7 @@ static struct configfs_attribute *userdata_attrs[] = { &sysdata_attr_cpu_nr_enabled, &sysdata_attr_taskname_enabled, &sysdata_attr_release_enabled, + &sysdata_attr_msgid_enabled, NULL, }; @@ -1246,6 +1365,14 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset) init_utsname()->release); } +static int sysdata_append_msgid(struct netconsole_target *nt, int offset) +{ + wrapping_assign_add(nt->msgcounter, 1); + return scnprintf(&nt->extradata_complete[offset], + MAX_EXTRADATA_ENTRY_LEN, " msgid=%u\n", + nt->msgcounter); +} + /* * prepare_extradata - append sysdata at extradata_complete in runtime * @nt: target to send message to @@ -1268,6 +1395,8 @@ static int prepare_extradata(struct netconsole_target *nt) extradata_len += sysdata_append_taskname(nt, extradata_len); if (nt->sysdata_fields & SYSDATA_RELEASE) extradata_len += sysdata_append_release(nt, extradata_len); + if (nt->sysdata_fields & SYSDATA_MSGID) + extradata_len += sysdata_append_msgid(nt, extradata_len); WARN_ON_ONCE(extradata_len > MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS); @@ -1613,6 +1742,120 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) spin_unlock_irqrestore(&target_list_lock, flags); } +static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) +{ + const char *end; + + if (!strchr(str, ':') && + in4_pton(str, -1, (void *)addr, -1, &end) > 0) { + if (!*end) + return 0; + } + if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) { +#if IS_ENABLED(CONFIG_IPV6) + if (!*end) + return 1; +#else + return -1; +#endif + } + return -1; +} + +static int netconsole_parser_cmdline(struct netpoll *np, char *opt) +{ + bool ipversion_set = false; + char *cur = opt; + char *delim; + int ipv6; + + if (*cur != '@') { + delim = strchr(cur, '@'); + if (!delim) + goto parse_failed; + *delim = 0; + if (kstrtou16(cur, 10, &np->local_port)) + goto parse_failed; + cur = delim; + } + cur++; + + if (*cur != '/') { + ipversion_set = true; + delim = strchr(cur, '/'); + if (!delim) + goto parse_failed; + *delim = 0; + ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip); + if (ipv6 < 0) + goto parse_failed; + else + np->ipv6 = (bool)ipv6; + cur = delim; + } + cur++; + + if (*cur != ',') { + /* parse out dev_name or dev_mac */ + delim = strchr(cur, ','); + if (!delim) + goto parse_failed; + *delim = 0; + + np->dev_name[0] = '\0'; + eth_broadcast_addr(np->dev_mac); + if (!strchr(cur, ':')) + strscpy(np->dev_name, cur, sizeof(np->dev_name)); + else if (!mac_pton(cur, np->dev_mac)) + goto parse_failed; + + cur = delim; + } + cur++; + + if (*cur != '@') { + /* dst port */ + delim = strchr(cur, '@'); + if (!delim) + goto parse_failed; + *delim = 0; + if (*cur == ' ' || *cur == '\t') + np_info(np, "warning: whitespace is not allowed\n"); + if (kstrtou16(cur, 10, &np->remote_port)) + goto parse_failed; + cur = delim; + } + cur++; + + /* dst ip */ + delim = strchr(cur, '/'); + if (!delim) + goto parse_failed; + *delim = 0; + ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); + if (ipv6 < 0) + goto parse_failed; + else if (ipversion_set && np->ipv6 != (bool)ipv6) + goto parse_failed; + else + np->ipv6 = (bool)ipv6; + cur = delim + 1; + + if (*cur != 0) { + /* MAC address */ + if (!mac_pton(cur, np->remote_mac)) + goto parse_failed; + } + + netconsole_print_banner(np); + + return 0; + + parse_failed: + np_info(np, "couldn't parse config at '%s'!\n", cur); + return -1; +} + /* Allocate new target (from boot/module param) and setup netpoll for it */ static struct netconsole_target *alloc_param_target(char *target_config, int cmdline_count) @@ -1642,7 +1885,7 @@ static struct netconsole_target *alloc_param_target(char *target_config, } /* Parse parameters and setup netpoll */ - err = netpoll_parse_options(&nt->np, target_config); + err = netconsole_parser_cmdline(&nt->np, target_config); if (err) goto fail; @@ -1690,8 +1933,8 @@ static int __init init_netconsole(void) { int err; struct netconsole_target *nt, *tmp; + u32 console_type_needed = 0; unsigned int count = 0; - bool extended = false; unsigned long flags; char *target_config; char *input = config; @@ -1707,9 +1950,10 @@ static int __init init_netconsole(void) } /* Dump existing printks when we register */ if (nt->extended) { - extended = true; + console_type_needed |= CONS_EXTENDED; netconsole_ext.flags |= CON_PRINTBUFFER; } else { + console_type_needed |= CONS_BASIC; netconsole.flags |= CON_PRINTBUFFER; } @@ -1728,9 +1972,10 @@ static int __init init_netconsole(void) if (err) goto undonotifier; - if (extended) + if (console_type_needed & CONS_EXTENDED) register_console(&netconsole_ext); - register_console(&netconsole); + if (console_type_needed & CONS_BASIC) + register_console(&netconsole); pr_info("network logging started\n"); return err; @@ -1760,7 +2005,8 @@ static void __exit cleanup_netconsole(void) if (console_is_registered(&netconsole_ext)) unregister_console(&netconsole_ext); - unregister_console(&netconsole); + if (console_is_registered(&netconsole)) + unregister_console(&netconsole); dynamic_netconsole_exit(); unregister_netdevice_notifier(&netconsole_netdev_notifier); diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index fa5fbd97ad69..7f2809be5d48 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -93,19 +93,14 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL); rcu_read_unlock(); - u64_stats_update_begin(&ns->syncp); - ns->tx_packets++; - ns->tx_bytes += len; - u64_stats_update_end(&ns->syncp); + dev_dstats_tx_add(dev, skb->len); return NETDEV_TX_OK; out_drop_free: dev_kfree_skb(skb); out_drop_cnt: rcu_read_unlock(); - u64_stats_update_begin(&ns->syncp); - ns->tx_dropped++; - u64_stats_update_end(&ns->syncp); + dev_dstats_tx_dropped(dev); return NETDEV_TX_OK; } @@ -126,20 +121,6 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static void -nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - struct netdevsim *ns = netdev_priv(dev); - unsigned int start; - - do { - start = u64_stats_fetch_begin(&ns->syncp); - stats->tx_bytes = ns->tx_bytes; - stats->tx_packets = ns->tx_packets; - stats->tx_dropped = ns->tx_dropped; - } while (u64_stats_fetch_retry(&ns->syncp, start)); -} - static int nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { @@ -350,16 +331,24 @@ static int nsim_get_iflink(const struct net_device *dev) static int nsim_rcv(struct nsim_rq *rq, int budget) { + struct net_device *dev = rq->napi.dev; struct sk_buff *skb; - int i; + unsigned int skblen; + int i, ret; for (i = 0; i < budget; i++) { if (skb_queue_empty(&rq->skb_queue)) break; skb = skb_dequeue(&rq->skb_queue); + /* skb might be discard at netif_receive_skb, save the len */ + skblen = skb->len; skb_mark_napi_id(skb, &rq->napi); - netif_receive_skb(skb); + ret = netif_receive_skb(skb); + if (ret == NET_RX_SUCCESS) + dev_dstats_rx_add(dev, skblen); + else + dev_dstats_rx_dropped(dev); } return i; @@ -556,7 +545,6 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = nsim_change_mtu, - .ndo_get_stats64 = nsim_get_stats64, .ndo_set_vf_mac = nsim_set_vf_mac, .ndo_set_vf_vlan = nsim_set_vf_vlan, .ndo_set_vf_rate = nsim_set_vf_rate, @@ -580,7 +568,6 @@ static const struct net_device_ops nsim_vf_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = nsim_change_mtu, - .ndo_get_stats64 = nsim_get_stats64, .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, }; @@ -594,7 +581,7 @@ static void nsim_get_queue_stats_rx(struct net_device *dev, int idx, struct rtnl_link_stats64 rtstats = {}; if (!idx) - nsim_get_stats64(dev, &rtstats); + dev_get_stats(dev, &rtstats); stats->packets = rtstats.rx_packets - !!rtstats.rx_packets; stats->bytes = rtstats.rx_bytes; @@ -606,7 +593,7 @@ static void nsim_get_queue_stats_tx(struct net_device *dev, int idx, struct rtnl_link_stats64 rtstats = {}; if (!idx) - nsim_get_stats64(dev, &rtstats); + dev_get_stats(dev, &rtstats); stats->packets = rtstats.tx_packets - !!rtstats.tx_packets; stats->bytes = rtstats.tx_bytes; @@ -618,7 +605,7 @@ static void nsim_get_base_stats(struct net_device *dev, { struct rtnl_link_stats64 rtstats = {}; - nsim_get_stats64(dev, &rtstats); + dev_get_stats(dev, &rtstats); rx->packets = !!rtstats.rx_packets; rx->bytes = 0; @@ -645,9 +632,12 @@ static struct nsim_rq *nsim_queue_alloc(void) return rq; } -static void nsim_queue_free(struct nsim_rq *rq) +static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq) { hrtimer_cancel(&rq->napi_timer); + local_bh_disable(); + dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); + local_bh_enable(); skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); kfree(rq); } @@ -694,7 +684,7 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx) return 0; err_free: - nsim_queue_free(qmem->rq); + nsim_queue_free(dev, qmem->rq); return err; } @@ -708,7 +698,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem) if (!ns->rq_reset_mode) netif_napi_del_locked(&qmem->rq->napi); page_pool_destroy(qmem->rq->page_pool); - nsim_queue_free(qmem->rq); + nsim_queue_free(dev, qmem->rq); } } @@ -890,6 +880,7 @@ static void nsim_setup(struct net_device *dev) NETIF_F_HW_CSUM | NETIF_F_LRO | NETIF_F_TSO; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; dev->max_mtu = ETH_MAX_MTU; dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD; } @@ -925,7 +916,7 @@ static void nsim_queue_uninit(struct netdevsim *ns) int i; for (i = 0; i < dev->num_rx_queues; i++) - nsim_queue_free(ns->rq[i]); + nsim_queue_free(dev, ns->rq[i]); kfree(ns->rq); ns->rq = NULL; @@ -1022,7 +1013,6 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) dev_net_set(dev, nsim_dev_net(nsim_dev)); ns = netdev_priv(dev); ns->netdev = dev; - u64_stats_init(&ns->syncp); ns->nsim_dev = nsim_dev; ns->nsim_dev_port = nsim_dev_port; ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index d04401f0bdf7..4a0c48c7a384 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -108,11 +108,6 @@ struct netdevsim { int rq_reset_mode; - u64 tx_packets; - u64 tx_bytes; - u64 tx_dropped; - struct u64_stats_sync syncp; - struct nsim_bus_dev *nsim_bus_dev; struct bpf_prog *bpf_offloaded; @@ -131,7 +126,6 @@ struct netdevsim { struct nsim_macsec macsec; struct { u32 inject_error; - u32 sleep; u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS]; u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS]; struct dentry *ddir; @@ -342,7 +336,6 @@ struct nsim_dev { bool ipv4_only; bool shared; bool static_iana_vxlan; - u32 sleep; } udp_ports; struct nsim_dev_psample *psample; u16 esw_mode; diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c index 640b4983a9a0..89fff76e51cf 100644 --- a/drivers/net/netdevsim/udp_tunnels.c +++ b/drivers/net/netdevsim/udp_tunnels.c @@ -18,9 +18,6 @@ nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table, ret = -ns->udp_ports.inject_error; ns->udp_ports.inject_error = 0; - if (ns->udp_ports.sleep) - msleep(ns->udp_ports.sleep); - if (!ret) { if (ns->udp_ports.ports[table][entry]) { WARN(1, "entry already in use\n"); @@ -47,8 +44,6 @@ nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table, ret = -ns->udp_ports.inject_error; ns->udp_ports.inject_error = 0; - if (ns->udp_ports.sleep) - msleep(ns->udp_ports.sleep); if (!ret) { u32 val = be16_to_cpu(ti->port) << 16 | ti->type; @@ -112,12 +107,10 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data, struct net_device *dev = file->private_data; struct netdevsim *ns = netdev_priv(dev); - rtnl_lock(); if (dev->reg_state == NETREG_REGISTERED) { memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports)); udp_tunnel_nic_reset_ntf(dev); } - rtnl_unlock(); return count; } @@ -172,7 +165,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, GFP_KERNEL); if (!info) return -ENOMEM; - ns->udp_ports.sleep = nsim_dev->udp_ports.sleep; if (nsim_dev->udp_ports.sync_all) { info->set_port = NULL; @@ -181,8 +173,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, info->sync_table = NULL; } - if (ns->udp_ports.sleep) - info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP; if (nsim_dev->udp_ports.open_only) info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY; if (nsim_dev->udp_ports.ipv4_only) @@ -217,6 +207,4 @@ void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev) &nsim_dev->udp_ports.shared); debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir, &nsim_dev->udp_ports.static_iana_vxlan); - debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir, - &nsim_dev->udp_ports.sleep); } diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c index bff00946eae2..254cc94c4617 100644 --- a/drivers/net/ovpn/udp.c +++ b/drivers/net/ovpn/udp.c @@ -199,7 +199,7 @@ static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind, transmit: udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0, ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, - fl.fl4_dport, false, sk->sk_no_check_tx); + fl.fl4_dport, false, sk->sk_no_check_tx, 0); ret = 0; err: local_bh_enable(); @@ -274,7 +274,7 @@ transmit: skb->ignore_df = 1; udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0, ip6_dst_hoplimit(dst), 0, fl.fl6_sport, - fl.fl6_dport, udp_get_no_check6_tx(sk)); + fl.fl6_dport, udp_get_no_check6_tx(sk), 0); ret = 0; err: local_bh_enable(); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 53dad2482026..28acc6392cfc 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -25,6 +25,9 @@ if PHYLIB config SWPHY bool +config PHY_PACKAGE + tristate + config LED_TRIGGER_PHY bool "Support LED triggers for tracking link state" depends on LEDS_TRIGGERS @@ -157,6 +160,7 @@ config BCM54140_PHY tristate "Broadcom BCM54140 PHY" depends on HWMON || HWMON=n select BCM_NET_PHYLIB + select PHY_PACKAGE help Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY. @@ -292,6 +296,7 @@ source "drivers/net/phy/mediatek/Kconfig" config MICREL_PHY tristate "Micrel PHYs" depends on PTP_1588_CLOCK_OPTIONAL + select PHY_PACKAGE help Supports the KSZ9021, VSC8201, KS8001 PHYs. @@ -323,6 +328,7 @@ config MICROSEMI_PHY depends on MACSEC || MACSEC=n depends on PTP_1588_CLOCK_OPTIONAL || !NETWORK_PHY_TIMESTAMPING select CRYPTO_LIB_AES if MACSEC + select PHY_PACKAGE help Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 7827609e9032..b4795aaf9c1c 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,7 +3,7 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o phy_link_topology.o \ - phy_package.o phy_caps.o mdio_bus_provider.o + phy_caps.o mdio_bus_provider.o mdio-bus-y += mdio_bus.o mdio_device.o ifdef CONFIG_PHYLIB @@ -19,6 +19,7 @@ obj-$(CONFIG_MDIO_BUS) += mdio-bus.o obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_PHYLIB) += mdio_devres.o +obj-$(CONFIG_PHY_PACKAGE) += phy_package.o obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 01255dada600..33db21251f2e 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -516,7 +516,6 @@ static int dp83822_config_init_leds(struct phy_device *phydev) static int dp83822_config_init(struct phy_device *phydev) { struct dp83822_private *dp83822 = phydev->priv; - struct device *dev = &phydev->mdio.dev; int rgmii_delay = 0; s32 rx_int_delay; s32 tx_int_delay; @@ -549,15 +548,13 @@ static int dp83822_config_init(struct phy_device *phydev) return err; if (phy_interface_is_rgmii(phydev)) { - rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, - true); + rx_int_delay = phy_get_internal_delay(phydev, NULL, 0, true); /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */ if (rx_int_delay > 0) rgmii_delay |= DP83822_RX_CLK_SHIFT; - tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, - false); + tx_int_delay = phy_get_internal_delay(phydev, NULL, 0, false); /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */ if (tx_int_delay <= 0) diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index a62cd838a9ea..a2cd1cc35cde 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -540,9 +540,8 @@ static const int dp83869_internal_delay[] = {250, 500, 750, 1000, 1250, 1500, static int dp83869_of_init(struct phy_device *phydev) { + struct device_node *of_node = phydev->mdio.dev.of_node; struct dp83869_private *dp83869 = phydev->priv; - struct device *dev = &phydev->mdio.dev; - struct device_node *of_node = dev->of_node; int delay_size = ARRAY_SIZE(dp83869_internal_delay); int ret; @@ -597,13 +596,13 @@ static int dp83869_of_init(struct phy_device *phydev) &dp83869->tx_fifo_depth)) dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB; - dp83869->rx_int_delay = phy_get_internal_delay(phydev, dev, + dp83869->rx_int_delay = phy_get_internal_delay(phydev, &dp83869_internal_delay[0], delay_size, true); if (dp83869->rx_int_delay < 0) dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF; - dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev, + dp83869->tx_int_delay = phy_get_internal_delay(phydev, &dp83869_internal_delay[0], delay_size, false); if (dp83869->tx_int_delay < 0) diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c index 7e76323409c4..391c1d868808 100644 --- a/drivers/net/phy/dp83tg720.c +++ b/drivers/net/phy/dp83tg720.c @@ -13,21 +13,92 @@ #include "open_alliance_helpers.h" /* + * DP83TG720 PHY Limitations and Workarounds + * + * The DP83TG720 1000BASE-T1 PHY has several limitations that require + * software-side mitigations. These workarounds are implemented throughout + * this driver. This section documents the known issues and their corresponding + * mitigation strategies. + * + * 1. Unreliable Link Detection and Synchronized Reset Deadlock + * ------------------------------------------------------------ + * After a link loss or during link establishment, the DP83TG720 PHY may fail + * to detect or report link status correctly. As of June 2025, no public + * errata sheet for the DP83TG720 PHY documents this behavior. + * The "DP83TC81x, DP83TG72x Software Implementation Guide" application note + * (SNLA404, available at https://www.ti.com/lit/an/snla404/snla404.pdf) + * recommends performing a soft restart if polling for a link fails to establish + * a connection after 100ms. This procedure is adopted as the workaround for the + * observed link detection issue. + * + * However, in point-to-point setups where both link partners use the same + * driver (e.g. Linux on both sides), a synchronized reset pattern may emerge. + * This leads to a deadlock, where both PHYs reset at the same time and + * continuously miss each other during auto-negotiation. + * + * To address this, the reset procedure includes two components: + * + * - A **fixed minimum delay of 1ms** after a hardware reset. The datasheet + * "DP83TG720S-Q1 1000BASE-T1 Automotive Ethernet PHY with SGMII and RGMII" + * specifies this as the "Post reset stabilization-time prior to MDC preamble + * for register access" (T6.2), ensuring the PHY is ready for MDIO + * operations. + * + * - An **additional asymmetric delay**, empirically chosen based on + * master/slave role. This reduces the risk of synchronized resets on both + * link partners. Values are selected to avoid periodic overlap and ensure + * the link is re-established within a few cycles. + * + * The functions that implement this logic are: + * - dp83tg720_soft_reset() + * - dp83tg720_get_next_update_time() + * + * 2. Polling-Based Link Detection and IRQ Support + * ----------------------------------------------- + * Due to the PHY-specific limitation described in section 1, link-up events + * cannot be reliably detected via interrupts on the DP83TG720. Therefore, + * polling is required to detect transitions from link-down to link-up. + * + * While link-down events *can* be detected via IRQs on this PHY, this driver + * currently does **not** implement interrupt support. As a result, all link + * state changes must be detected using polling. + * + * Polling behavior: + * - When the link is up: slow polling (e.g. 1s). + * - When the link just went down: fast polling for a short time. + * - When the link stays down: fallback to slow polling. + * + * This design balances responsiveness and CPU usage. It sacrifices fast link-up + * times in cases where the link is expected to remain down for extended periods, + * assuming that such systems do not require immediate reactivity. + */ + +/* * DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link * is active. - * DP83TG720S_POLL_NO_LINK_MIN - Minimum polling interval in milliseconds when - * the link is down. - * DP83TG720S_POLL_NO_LINK_MAX - Maximum polling interval in milliseconds when - * the link is down. + * DP83TG720S_POLL_NO_LINK - Polling interval in milliseconds when the + * link is down. + * DP83TG720S_FAST_POLL_DURATION_MS - Timeout in milliseconds for no-link + * polling after which polling interval is + * increased. + * DP83TG720S_POLL_SLOW - Slow polling interval when there is no + * link for a prolongued period. + * DP83TG720S_RESET_DELAY_MS_MASTER - Delay after a reset before attempting + * to establish a link again for master phy. + * DP83TG720S_RESET_DELAY_MS_SLAVE - Delay after a reset before attempting + * to establish a link again for slave phy. * * These values are not documented or officially recommended by the vendor but * were determined through empirical testing. They achieve a good balance in * minimizing the number of reset retries while ensuring reliable link recovery * within a reasonable timeframe. */ -#define DP83TG720S_POLL_ACTIVE_LINK 1000 -#define DP83TG720S_POLL_NO_LINK_MIN 100 -#define DP83TG720S_POLL_NO_LINK_MAX 1000 +#define DP83TG720S_POLL_ACTIVE_LINK 421 +#define DP83TG720S_POLL_NO_LINK 149 +#define DP83TG720S_FAST_POLL_DURATION_MS 6000 +#define DP83TG720S_POLL_SLOW 1117 +#define DP83TG720S_RESET_DELAY_MS_MASTER 97 +#define DP83TG720S_RESET_DELAY_MS_SLAVE 149 #define DP83TG720S_PHY_ID 0x2000a284 @@ -124,6 +195,7 @@ struct dp83tg720_stats { struct dp83tg720_priv { struct dp83tg720_stats stats; + unsigned long last_link_down_jiffies; }; /** @@ -201,6 +273,26 @@ static int dp83tg720_update_stats(struct phy_device *phydev) return 0; } +static int dp83tg720_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET); + if (ret) + return ret; + + /* Include mandatory MDC-access delay (1ms) + extra asymmetric delay to + * avoid synchronized reset deadlock. See section 1 in the top-of-file + * comment block. + */ + if (phydev->master_slave_state == MASTER_SLAVE_STATE_SLAVE) + msleep(DP83TG720S_RESET_DELAY_MS_SLAVE); + else + msleep(DP83TG720S_RESET_DELAY_MS_MASTER); + + return ret; +} + static void dp83tg720_get_link_stats(struct phy_device *phydev, struct ethtool_link_ext_stats *link_stats) { @@ -382,21 +474,11 @@ static int dp83tg720_read_status(struct phy_device *phydev) /* According to the "DP83TC81x, DP83TG72x Software * Implementation Guide", the PHY needs to be reset after a * link loss or if no link is created after at least 100ms. - * - * Currently we are polling with the PHY_STATE_TIME (1000ms) - * interval, which is still enough for not automotive use cases. */ ret = phy_init_hw(phydev); if (ret) return ret; - /* Sleep 600ms for PHY stabilization post-reset. - * Empirically chosen value (not documented). - * Helps reduce reset bounces with link partners having similar - * issues. - */ - msleep(600); - /* After HW reset we need to restore master/slave configuration. * genphy_c45_pma_baset1_read_master_slave() call will be done * by the dp83tg720_config_aneg() function. @@ -477,19 +559,11 @@ static int dp83tg720_config_init(struct phy_device *phydev) { int ret; - /* Software Restart is not enough to recover from a link failure. - * Using Hardware Reset instead. - */ - ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET); + /* Reset the PHY to recover from a link failure */ + ret = dp83tg720_soft_reset(phydev); if (ret) return ret; - /* Wait until MDC can be used again. - * The wait value of one 1ms is documented in "DP83TG720S-Q1 1000BASE-T1 - * Automotive Ethernet PHY with SGMII and RGMII" datasheet. - */ - usleep_range(1000, 2000); - if (phy_interface_is_rgmii(phydev)) { ret = dp83tg720_config_rgmii_delay(phydev); if (ret) @@ -525,50 +599,42 @@ static int dp83tg720_probe(struct phy_device *phydev) } /** - * dp83tg720_get_next_update_time - Determine the next update time for PHY - * state + * dp83tg720_get_next_update_time - Return next polling interval for PHY state * @phydev: Pointer to the phy_device structure * - * This function addresses a limitation of the DP83TG720 PHY, which cannot - * reliably detect or report a stable link state. To recover from such - * scenarios, the PHY must be periodically reset when the link is down. However, - * if the link partner also runs Linux with the same driver, synchronized reset - * intervals can lead to a deadlock where the link never establishes due to - * simultaneous resets on both sides. - * - * To avoid this, the function implements randomized polling intervals when the - * link is down. It ensures that reset intervals are desynchronized by - * introducing a random delay between a configured minimum and maximum range. - * When the link is up, a fixed polling interval is used to minimize overhead. - * - * This mechanism guarantees that the link will reestablish within 10 seconds - * in the worst-case scenario. + * Implements adaptive polling interval logic depending on link state and + * downtime duration. See the "2. Polling-Based Link Detection and IRQ Support" + * section at the top of this file for details. * - * Return: Time (in jiffies) until the next update event for the PHY state - * machine. + * Return: Time (in jiffies) until the next poll */ static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev) { + struct dp83tg720_priv *priv = phydev->priv; unsigned int next_time_jiffies; if (phydev->link) { - /* When the link is up, use a fixed 1000ms interval - * (in jiffies) - */ + priv->last_link_down_jiffies = 0; + + /* When the link is up, use a slower interval (in jiffies) */ next_time_jiffies = msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK); } else { - unsigned int min_jiffies, max_jiffies, rand_jiffies; - - /* When the link is down, randomize interval between min/max - * (in jiffies) - */ - min_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MIN); - max_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MAX); - - rand_jiffies = min_jiffies + - get_random_u32_below(max_jiffies - min_jiffies + 1); - next_time_jiffies = rand_jiffies; + unsigned long now = jiffies; + + if (!priv->last_link_down_jiffies) + priv->last_link_down_jiffies = now; + + if (time_before(now, priv->last_link_down_jiffies + + msecs_to_jiffies(DP83TG720S_FAST_POLL_DURATION_MS))) { + /* Link recently went down: fast polling */ + next_time_jiffies = + msecs_to_jiffies(DP83TG720S_POLL_NO_LINK); + } else { + /* Link has been down for a while: slow polling */ + next_time_jiffies = + msecs_to_jiffies(DP83TG720S_POLL_SLOW); + } } /* Ensure the polling time is at least one jiffy */ @@ -582,6 +648,7 @@ static struct phy_driver dp83tg720_driver[] = { .flags = PHY_POLL_CABLE_TEST, .probe = dp83tg720_probe, + .soft_reset = dp83tg720_soft_reset, .config_aneg = dp83tg720_config_aneg, .read_status = dp83tg720_read_status, .get_features = genphy_c45_pma_read_ext_abilities, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index a44771e8acdc..9766dd99afaa 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -174,7 +174,6 @@ static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500, static int xway_gphy_rgmii_init(struct phy_device *phydev) { - struct device *dev = &phydev->mdio.dev; unsigned int delay_size = ARRAY_SIZE(xway_internal_delay); s32 int_delay; int val = 0; @@ -207,8 +206,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev) if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - int_delay = phy_get_internal_delay(phydev, dev, - xway_internal_delay, + int_delay = phy_get_internal_delay(phydev, xway_internal_delay, delay_size, true); /* if rx-internal-delay-ps is missing, use default of 2.0 ns */ @@ -220,8 +218,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev) if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - int_delay = phy_get_internal_delay(phydev, dev, - xway_internal_delay, + int_delay = phy_get_internal_delay(phydev, xway_internal_delay, delay_size, false); /* if tx-internal-delay-ps is missing, use default of 2.0 ns */ diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 2de679a68115..d3184e8f12ec 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -3,17 +3,23 @@ * mdio-boardinfo - Collect pre-declarations for MDIO devices */ -#include <linux/kernel.h> -#include <linux/slab.h> #include <linux/export.h> -#include <linux/mutex.h> +#include <linux/kernel.h> #include <linux/list.h> +#include <linux/mutex.h> +#include <linux/phy.h> +#include <linux/slab.h> #include "mdio-boardinfo.h" static LIST_HEAD(mdio_board_list); static DEFINE_MUTEX(mdio_board_lock); +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + /** * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices * from pre-collected board specific MDIO information @@ -26,24 +32,18 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, (struct mii_bus *bus, struct mdio_board_info *bi)) { - struct mdio_board_entry *be; - struct mdio_board_entry *tmp; - struct mdio_board_info *bi; - int ret; + struct mdio_board_entry *be, *tmp; mutex_lock(&mdio_board_lock); list_for_each_entry_safe(be, tmp, &mdio_board_list, list) { - bi = &be->board_info; + struct mdio_board_info *bi = &be->board_info; if (strcmp(bus->id, bi->bus_id)) continue; mutex_unlock(&mdio_board_lock); - ret = cb(bus, bi); + cb(bus, bi); mutex_lock(&mdio_board_lock); - if (ret) - continue; - } mutex_unlock(&mdio_board_lock); } @@ -62,14 +62,13 @@ int mdiobus_register_board_info(const struct mdio_board_info *info, unsigned int n) { struct mdio_board_entry *be; - unsigned int i; be = kcalloc(n, sizeof(*be), GFP_KERNEL); if (!be) return -ENOMEM; - for (i = 0; i < n; i++, be++, info++) { - memcpy(&be->board_info, info, sizeof(*info)); + for (int i = 0; i < n; i++, be++) { + be->board_info = info[i]; mutex_lock(&mdio_board_lock); list_add_tail(&be->list, &mdio_board_list); mutex_unlock(&mdio_board_lock); diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h index 773bb51399be..0878b77878d4 100644 --- a/drivers/net/phy/mdio-boardinfo.h +++ b/drivers/net/phy/mdio-boardinfo.h @@ -7,13 +7,8 @@ #ifndef __MDIO_BOARD_INFO_H #define __MDIO_BOARD_INFO_H -#include <linux/phy.h> -#include <linux/mutex.h> - -struct mdio_board_entry { - struct list_head list; - struct mdio_board_info board_info; -}; +struct mii_bus; +struct mdio_board_info; void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, int (*cb) diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c index 65850e36284d..48dc4bf85125 100644 --- a/drivers/net/phy/mdio_bus_provider.c +++ b/drivers/net/phy/mdio_bus_provider.c @@ -152,7 +152,6 @@ static int mdiobus_create_device(struct mii_bus *bus, strscpy(mdiodev->modalias, bi->modalias, sizeof(mdiodev->modalias)); - mdiodev->bus_match = mdio_device_bus_match; mdiodev->dev.platform_data = (void *)bi->platform_data; ret = mdio_device_register(mdiodev); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index cce3f405d1a4..f64176e0e197 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -35,7 +35,8 @@ static void mdio_device_release(struct device *dev) kfree(to_mdio_device(dev)); } -int mdio_device_bus_match(struct device *dev, const struct device_driver *drv) +static int mdio_device_bus_match(struct device *dev, + const struct device_driver *drv) { struct mdio_device *mdiodev = to_mdio_device(dev); const struct mdio_driver *mdiodrv = to_mdio_driver(drv); @@ -45,7 +46,6 @@ int mdio_device_bus_match(struct device *dev, const struct device_driver *drv) return strcmp(mdiodev->modalias, drv->name) == 0; } -EXPORT_SYMBOL_GPL(mdio_device_bus_match); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) { @@ -59,6 +59,7 @@ struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) mdiodev->dev.release = mdio_device_release; mdiodev->dev.parent = &bus->dev; mdiodev->dev.bus = &mdio_bus_type; + mdiodev->bus_match = mdio_device_bus_match; mdiodev->device_free = mdio_device_free; mdiodev->device_remove = mdio_device_remove; mdiodev->bus = bus; diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig index 9f30a91be8dd..bb7dc876271e 100644 --- a/drivers/net/phy/mediatek/Kconfig +++ b/drivers/net/phy/mediatek/Kconfig @@ -27,6 +27,7 @@ config MEDIATEK_GE_SOC_PHY depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \ COMPILE_TEST select MTK_NET_PHYLIB + select PHY_PACKAGE help Supports MediaTek SoC built-in Gigabit Ethernet PHYs. diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 64aa03aed770..d0429dc8f561 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -431,6 +431,10 @@ struct kszphy_ptp_priv { spinlock_t seconds_lock; }; +struct kszphy_phy_stats { + u64 rx_err_pkt_cnt; +}; + struct kszphy_priv { struct kszphy_ptp_priv ptp_priv; const struct kszphy_type *type; @@ -441,6 +445,7 @@ struct kszphy_priv { bool rmii_ref_clk_sel_val; bool clk_enable; u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; + struct kszphy_phy_stats phy_stats; }; static const struct kszphy_type lan8814_type = { @@ -1718,7 +1723,8 @@ static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat) * * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity */ - if (phydev_id_compare(phydev, PHY_ID_KSZ9131)) + if (phydev_id_compare(phydev, PHY_ID_KSZ9131) || + phydev_id_compare(phydev, PHY_ID_KSZ9477)) dt = clamp(dt - 22, 0, 255); return (dt * 400) / 10; @@ -1792,12 +1798,20 @@ static int ksz9x31_cable_test_get_status(struct phy_device *phydev, bool *finished) { struct kszphy_priv *priv = phydev->priv; - unsigned long pair_mask = 0xf; + unsigned long pair_mask; int retries = 20; int pair, ret, rv; *finished = false; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported)) + pair_mask = 0xf; /* All pairs */ + else + pair_mask = 0x3; /* Pairs A and B only */ + /* Try harder if link partner is active */ while (pair_mask && retries--) { for_each_set_bit(pair, &pair_mask, 4) { @@ -1948,6 +1962,56 @@ static int ksz886x_read_status(struct phy_device *phydev) return genphy_read_status(phydev); } +static int ksz9477_mdix_update(struct phy_device *phydev) +{ + if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) + phydev->mdix = phydev->mdix_ctrl; + else + phydev->mdix = ETH_TP_MDI_INVALID; + + return 0; +} + +static int ksz9477_read_mdix_ctrl(struct phy_device *phydev) +{ + int val; + + val = phy_read(phydev, MII_KSZ9131_AUTO_MDIX); + if (val < 0) + return val; + + if (!(val & MII_KSZ9131_AUTO_MDIX_SWAP_OFF)) + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + else if (val & MII_KSZ9131_AUTO_MDI_SET) + phydev->mdix_ctrl = ETH_TP_MDI; + else + phydev->mdix_ctrl = ETH_TP_MDI_X; + + return 0; +} + +static int ksz9477_read_status(struct phy_device *phydev) +{ + int ret; + + ret = ksz9477_mdix_update(phydev); + if (ret) + return ret; + + return genphy_read_status(phydev); +} + +static int ksz9477_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = ksz9131_config_mdix(phydev, phydev->mdix_ctrl); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + struct ksz9477_errata_write { u8 dev_addr; u8 reg_addr; @@ -2029,6 +2093,13 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } + /* Read initial MDI-X config state. So, we do not need to poll it + * later on. + */ + err = ksz9477_read_mdix_ctrl(phydev); + if (err) + return err; + return kszphy_config_init(phydev); } @@ -2073,6 +2144,35 @@ static void kszphy_get_stats(struct phy_device *phydev, data[i] = kszphy_get_stat(phydev, i); } +/* KSZ9477 PHY RXER Counter. Probably supported by other PHYs like KSZ9313, + * etc. The counter is incremented when the PHY receives a frame with one or + * more symbol errors. The counter is cleared when the register is read. + */ +#define MII_KSZ9477_PHY_RXER_COUNTER 0x15 + +static int kszphy_update_stats(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + int ret; + + ret = phy_read(phydev, MII_KSZ9477_PHY_RXER_COUNTER); + if (ret < 0) + return ret; + + priv->phy_stats.rx_err_pkt_cnt += ret; + + return 0; +} + +static void kszphy_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *eth_stats, + struct ethtool_phy_stats *stats) +{ + struct kszphy_priv *priv = phydev->priv; + + stats->rx_errors = priv->phy_stats.rx_err_pkt_cnt; +} + static void kszphy_enable_clk(struct phy_device *phydev) { struct kszphy_priv *priv = phydev->priv; @@ -5688,12 +5788,19 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9477, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9477", + .probe = kszphy_probe, /* PHY_GBIT_FEATURES */ .config_init = ksz9477_config_init, .config_intr = kszphy_config_intr, + .config_aneg = ksz9477_config_aneg, + .read_status = ksz9477_read_status, .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, .resume = ksz9477_resume, + .get_phy_stats = kszphy_get_phy_stats, + .update_stats = kszphy_update_stats, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, } }; module_phy_driver(ksphy_driver); diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 7ff975efd8e7..7ed6522fb0ef 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -530,7 +530,6 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1; u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1; int delay_size = ARRAY_SIZE(vsc85xx_internal_delay); - struct device *dev = &phydev->mdio.dev; u16 reg_val = 0; u16 mask = 0; s32 rx_delay; @@ -549,7 +548,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, if (phy_interface_is_rgmii(phydev)) mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask; - rx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay, + rx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay, delay_size, true); if (rx_delay < 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || @@ -559,7 +558,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, rx_delay = RGMII_CLK_DELAY_0_2_NS; } - tx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay, + tx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay, delay_size, false); if (tx_delay < 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID || diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index bdd70d424491..61670be0f095 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -1573,10 +1573,3 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev, return ret; } EXPORT_SYMBOL(genphy_c45_ethtool_set_eee); - -struct phy_driver genphy_c45_driver = { - .phy_id = 0xffffffff, - .phy_id_mask = 0xffffffff, - .name = "Generic Clause 45 PHY", - .read_status = genphy_c45_read_status, -}; diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index e177037f9110..27f1833563ab 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -375,8 +375,8 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, devad | MII_MMD_CTRL_NOINCR); } -static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45, - int devad, u32 regnum) +int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45, + int devad, u32 regnum) { if (is_c45) return __mdiobus_c45_read(bus, phy_addr, devad, regnum); @@ -385,9 +385,10 @@ static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45, /* Read the content of the MMD's selected register */ return __mdiobus_read(bus, phy_addr, MII_MMD_DATA); } +EXPORT_SYMBOL_GPL(mmd_phy_read); -static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45, - int devad, u32 regnum, u16 val) +int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45, + int devad, u32 regnum, u16 val) { if (is_c45) return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val); @@ -396,6 +397,7 @@ static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45, /* Write the data into MMD's selected register */ return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val); } +EXPORT_SYMBOL_GPL(mmd_phy_write); /** * __phy_read_mmd - Convenience function for reading a register @@ -486,71 +488,6 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) EXPORT_SYMBOL(phy_write_mmd); /** - * __phy_package_read_mmd - read MMD reg relative to PHY package base addr - * @phydev: The phy_device struct - * @addr_offset: The offset to be added to PHY package base_addr - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Convenience helper for reading a register of an MMD on a given PHY - * using the PHY package base address. The base address is added to - * the addr_offset value. - * - * Same calling rules as for __phy_read(); - * - * NOTE: It's assumed that the entire PHY package is either C22 or C45. - */ -int __phy_package_read_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - if (regnum > (u16)~0 || devad > 32) - return -EINVAL; - - return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad, - regnum); -} -EXPORT_SYMBOL(__phy_package_read_mmd); - -/** - * __phy_package_write_mmd - write MMD reg relative to PHY package base addr - * @phydev: The phy_device struct - * @addr_offset: The offset to be added to PHY package base_addr - * @devad: The MMD to write to - * @regnum: The register on the MMD to write - * @val: value to write to @regnum - * - * Convenience helper for writing a register of an MMD on a given PHY - * using the PHY package base address. The base address is added to - * the addr_offset value. - * - * Same calling rules as for __phy_write(); - * - * NOTE: It's assumed that the entire PHY package is either C22 or C45. - */ -int __phy_package_write_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum, u16 val) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - if (regnum > (u16)~0 || devad > 32) - return -EINVAL; - - return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad, - regnum, val); -} -EXPORT_SYMBOL(__phy_package_write_mmd); - -/** * phy_modify_changed - Function for modifying a PHY register * @phydev: the phy_device struct * @regnum: register number to modify diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 73f9cb2e2844..90951681523c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -59,6 +59,13 @@ struct phy_fixup { int (*run)(struct phy_device *phydev); }; +static struct phy_driver genphy_c45_driver = { + .phy_id = 0xffffffff, + .phy_id_mask = 0xffffffff, + .name = "Generic Clause 45 PHY", + .read_status = genphy_c45_read_status, +}; + __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_features); @@ -645,11 +652,119 @@ static struct attribute *phy_dev_attrs[] = { &dev_attr_phy_dev_flags.attr, NULL, }; -ATTRIBUTE_GROUPS(phy_dev); + +static const struct attribute_group phy_dev_group = { + .attrs = phy_dev_attrs, +}; + +#define MMD_DEVICE_ID_ATTR(n) \ +static ssize_t mmd##n##_device_id_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct phy_device *phydev = to_phy_device(dev); \ + return sysfs_emit(buf, "0x%.8lx\n", \ + (unsigned long)phydev->c45_ids.device_ids[n]); \ +} \ +static DEVICE_ATTR_RO(mmd##n##_device_id) + +MMD_DEVICE_ID_ATTR(1); +MMD_DEVICE_ID_ATTR(2); +MMD_DEVICE_ID_ATTR(3); +MMD_DEVICE_ID_ATTR(4); +MMD_DEVICE_ID_ATTR(5); +MMD_DEVICE_ID_ATTR(6); +MMD_DEVICE_ID_ATTR(7); +MMD_DEVICE_ID_ATTR(8); +MMD_DEVICE_ID_ATTR(9); +MMD_DEVICE_ID_ATTR(10); +MMD_DEVICE_ID_ATTR(11); +MMD_DEVICE_ID_ATTR(12); +MMD_DEVICE_ID_ATTR(13); +MMD_DEVICE_ID_ATTR(14); +MMD_DEVICE_ID_ATTR(15); +MMD_DEVICE_ID_ATTR(16); +MMD_DEVICE_ID_ATTR(17); +MMD_DEVICE_ID_ATTR(18); +MMD_DEVICE_ID_ATTR(19); +MMD_DEVICE_ID_ATTR(20); +MMD_DEVICE_ID_ATTR(21); +MMD_DEVICE_ID_ATTR(22); +MMD_DEVICE_ID_ATTR(23); +MMD_DEVICE_ID_ATTR(24); +MMD_DEVICE_ID_ATTR(25); +MMD_DEVICE_ID_ATTR(26); +MMD_DEVICE_ID_ATTR(27); +MMD_DEVICE_ID_ATTR(28); +MMD_DEVICE_ID_ATTR(29); +MMD_DEVICE_ID_ATTR(30); +MMD_DEVICE_ID_ATTR(31); + +static struct attribute *phy_mmd_attrs[] = { + &dev_attr_mmd1_device_id.attr, + &dev_attr_mmd2_device_id.attr, + &dev_attr_mmd3_device_id.attr, + &dev_attr_mmd4_device_id.attr, + &dev_attr_mmd5_device_id.attr, + &dev_attr_mmd6_device_id.attr, + &dev_attr_mmd7_device_id.attr, + &dev_attr_mmd8_device_id.attr, + &dev_attr_mmd9_device_id.attr, + &dev_attr_mmd10_device_id.attr, + &dev_attr_mmd11_device_id.attr, + &dev_attr_mmd12_device_id.attr, + &dev_attr_mmd13_device_id.attr, + &dev_attr_mmd14_device_id.attr, + &dev_attr_mmd15_device_id.attr, + &dev_attr_mmd16_device_id.attr, + &dev_attr_mmd17_device_id.attr, + &dev_attr_mmd18_device_id.attr, + &dev_attr_mmd19_device_id.attr, + &dev_attr_mmd20_device_id.attr, + &dev_attr_mmd21_device_id.attr, + &dev_attr_mmd22_device_id.attr, + &dev_attr_mmd23_device_id.attr, + &dev_attr_mmd24_device_id.attr, + &dev_attr_mmd25_device_id.attr, + &dev_attr_mmd26_device_id.attr, + &dev_attr_mmd27_device_id.attr, + &dev_attr_mmd28_device_id.attr, + &dev_attr_mmd29_device_id.attr, + &dev_attr_mmd30_device_id.attr, + &dev_attr_mmd31_device_id.attr, + NULL +}; + +static umode_t phy_mmd_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct phy_device *phydev = to_phy_device(dev); + const int i = index + 1; + + if (!phydev->is_c45) + return 0; + if (i >= ARRAY_SIZE(phydev->c45_ids.device_ids) || + phydev->c45_ids.device_ids[i] == 0xffffffff) + return 0; + + return attr->mode; +} + +static const struct attribute_group phy_mmd_group = { + .name = "c45_phy_ids", + .attrs = phy_mmd_attrs, + .is_visible = phy_mmd_is_visible, +}; + +static const struct attribute_group *phy_device_groups[] = { + &phy_dev_group, + &phy_mmd_group, + NULL, +}; static const struct device_type mdio_bus_phy_type = { .name = "PHY", - .groups = phy_dev_groups, + .groups = phy_device_groups, .release = phy_device_release, .pm = pm_ptr(&mdio_bus_phy_pm_ops), }; @@ -1515,7 +1630,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, struct mii_bus *bus = phydev->mdio.bus; struct device *d = &phydev->mdio.dev; struct module *ndev_owner = NULL; - bool using_genphy = false; int err; /* For Ethernet device drivers that register their own MDIO bus, we @@ -1541,7 +1655,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, else d->driver = &genphy_driver.mdiodrv.driver; - using_genphy = true; + phydev->is_genphy_driven = 1; } if (!try_module_get(d->driver->owner)) { @@ -1550,7 +1664,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, goto error_put_device; } - if (using_genphy) { + if (phydev->is_genphy_driven) { err = d->driver->probe(d); if (err >= 0) err = device_bind_driver(d); @@ -1620,7 +1734,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, * the generic PHY driver we can't figure it out, thus set the old * legacy PORT_MII value. */ - if (using_genphy) + if (phydev->is_genphy_driven) phydev->port = PORT_MII; /* Initial carrier state is off as the phy is about to be @@ -1659,6 +1773,7 @@ error: error_module_put: module_put(d->driver->owner); + phydev->is_genphy_driven = 0; d->driver = NULL; error_put_device: put_device(d); @@ -1706,36 +1821,6 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, } EXPORT_SYMBOL(phy_attach); -static bool phy_driver_is_genphy_kind(struct phy_device *phydev, - struct device_driver *driver) -{ - struct device *d = &phydev->mdio.dev; - bool ret = false; - - if (!phydev->drv) - return ret; - - get_device(d); - ret = d->driver == driver; - put_device(d); - - return ret; -} - -bool phy_driver_is_genphy(struct phy_device *phydev) -{ - return phy_driver_is_genphy_kind(phydev, - &genphy_driver.mdiodrv.driver); -} -EXPORT_SYMBOL_GPL(phy_driver_is_genphy); - -bool phy_driver_is_genphy_10g(struct phy_device *phydev) -{ - return phy_driver_is_genphy_kind(phydev, - &genphy_c45_driver.mdiodrv.driver); -} -EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g); - /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct @@ -1792,9 +1877,10 @@ void phy_detach(struct phy_device *phydev) * from the generic driver so that there's a chance a * real driver could be loaded */ - if (phy_driver_is_genphy(phydev) || - phy_driver_is_genphy_10g(phydev)) + if (phydev->is_genphy_driven) { device_release_driver(&phydev->mdio.dev); + phydev->is_genphy_driven = 0; + } /* Assert the reset signal */ phy_device_reset(phydev, 1); @@ -2899,7 +2985,6 @@ static int phy_get_u32_property(struct device *dev, const char *name, u32 *val) /** * phy_get_internal_delay - returns the index of the internal delay * @phydev: phy_device struct - * @dev: pointer to the devices device struct * @delay_values: array of delays the PHY supports * @size: the size of the delay array * @is_rx: boolean to indicate to get the rx internal delay @@ -2912,9 +2997,10 @@ static int phy_get_u32_property(struct device *dev, const char *name, u32 *val) * array then size = 0 and the value of the delay property is returned. * Return -EINVAL if the delay is invalid or cannot be found. */ -s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, - const int *delay_values, int size, bool is_rx) +s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values, + int size, bool is_rx) { + struct device *dev = &phydev->mdio.dev; int i, ret; u32 delay; diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c index c738f76e8664..3024da0bbf7b 100644 --- a/drivers/net/phy/phy_package.c +++ b/drivers/net/phy/phy_package.c @@ -52,7 +52,8 @@ void *phy_package_get_priv(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(phy_package_get_priv); -int phy_package_address(struct phy_device *phydev, unsigned int addr_offset) +static int phy_package_address(struct phy_device *phydev, + unsigned int addr_offset) { struct phy_package_shared *shared = phydev->shared; u8 base_addr = shared->base_addr; @@ -90,6 +91,71 @@ int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset, } EXPORT_SYMBOL_GPL(__phy_package_write); +/** + * __phy_package_read_mmd - read MMD reg relative to PHY package base addr + * @phydev: The phy_device struct + * @addr_offset: The offset to be added to PHY package base_addr + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Convenience helper for reading a register of an MMD on a given PHY + * using the PHY package base address. The base address is added to + * the addr_offset value. + * + * Same calling rules as for __phy_read(); + * + * NOTE: It's assumed that the entire PHY package is either C22 or C45. + */ +int __phy_package_read_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum) +{ + int addr = phy_package_address(phydev, addr_offset); + + if (addr < 0) + return addr; + + if (regnum > (u16)~0 || devad > 32) + return -EINVAL; + + return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad, + regnum); +} +EXPORT_SYMBOL(__phy_package_read_mmd); + +/** + * __phy_package_write_mmd - write MMD reg relative to PHY package base addr + * @phydev: The phy_device struct + * @addr_offset: The offset to be added to PHY package base_addr + * @devad: The MMD to write to + * @regnum: The register on the MMD to write + * @val: value to write to @regnum + * + * Convenience helper for writing a register of an MMD on a given PHY + * using the PHY package base address. The base address is added to + * the addr_offset value. + * + * Same calling rules as for __phy_write(); + * + * NOTE: It's assumed that the entire PHY package is either C22 or C45. + */ +int __phy_package_write_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum, u16 val) +{ + int addr = phy_package_address(phydev, addr_offset); + + if (addr < 0) + return addr; + + if (regnum > (u16)~0 || devad > 32) + return -EINVAL; + + return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad, + regnum, val); +} +EXPORT_SYMBOL(__phy_package_write_mmd); + static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b) { struct phy_package_shared *shared = phydev->shared; @@ -348,3 +414,6 @@ int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, return ret; } EXPORT_SYMBOL_GPL(devm_of_phy_package_join); + +MODULE_DESCRIPTION("PHY package support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h index afac2bd15b50..ebda74eb60a5 100644 --- a/drivers/net/phy/phylib-internal.h +++ b/drivers/net/phy/phylib-internal.h @@ -7,6 +7,7 @@ #define __PHYLIB_INTERNAL_H struct phy_device; +struct mii_bus; /* * phy_supported_speeds - return all speeds currently supported by a PHY device @@ -20,7 +21,10 @@ void of_set_phy_timing_role(struct phy_device *phydev); int phy_speed_down_core(struct phy_device *phydev); void phy_check_downshift(struct phy_device *phydev); -int phy_package_address(struct phy_device *phydev, unsigned int addr_offset); +int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45, + int devad, u32 regnum); +int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45, + int devad, u32 regnum, u16 val); int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv); diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig index 570626cc8e14..bba14be8da2f 100644 --- a/drivers/net/phy/qcom/Kconfig +++ b/drivers/net/phy/qcom/Kconfig @@ -24,6 +24,7 @@ config QCA808X_PHY config QCA807X_PHY tristate "Qualcomm QCA807x PHYs" select QCOM_NET_PHYLIB + select PHY_PACKAGE depends on OF_MDIO help Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index 1af6b5ead74b..6d10ef7e9a8a 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -377,7 +377,7 @@ static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset) return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val); } -static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +static int qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct qca807x_gpio_priv *priv = gpiochip_get_data(gc); u16 reg; @@ -386,18 +386,19 @@ static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int valu reg = QCA807X_MMD7_LED_FORCE_CTRL(offset); val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg); + if (val < 0) + return val; + val &= ~QCA807X_GPIO_FORCE_MODE_MASK; val |= QCA807X_GPIO_FORCE_EN; val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value); - phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val); + return phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val); } static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value) { - qca807x_gpio_set(gc, offset, value); - - return 0; + return qca807x_gpio_set(gc, offset, value); } static int qca807x_gpio(struct phy_device *phydev) @@ -425,7 +426,7 @@ static int qca807x_gpio(struct phy_device *phydev) gc->get_direction = qca807x_gpio_get_direction; gc->direction_output = qca807x_gpio_dir_out; gc->get = qca807x_gpio_get; - gc->set = qca807x_gpio_set; + gc->set_rv = qca807x_gpio_set; return devm_gpiochip_add_data(dev, gc, priv); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 347c1e0e94d9..5347c95d1e77 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -361,6 +361,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) sfp->state_ignore_mask |= SFP_F_TX_FAULT; } +static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask) +{ + sfp->state_hw_mask &= ~mask; +} + static void sfp_fixup_nokia(struct sfp *sfp) { sfp_fixup_long_startup(sfp); @@ -409,7 +414,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp) * these are possibly used for other purposes on this * module, e.g. a serial port. */ - sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); +} + +static void sfp_fixup_potron(struct sfp *sfp) +{ + /* + * The TX_FAULT and LOS pins on this device are used for serial + * communication, so ignore them. Additionally, provide extra + * time for this device to fully start up. + */ + + sfp_fixup_long_startup(sfp); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); } static void sfp_fixup_rollball_cc(struct sfp *sfp) @@ -512,6 +529,8 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron), + // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault), diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index def84e87e05b..4cf9d1822a83 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -108,18 +108,6 @@ struct ppp_file { #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) /* - * Data structure to hold primary network stats for which - * we want to use 64 bit storage. Other network stats - * are stored in dev->stats of the ppp strucute. - */ -struct ppp_link_stats { - u64 rx_packets; - u64 tx_packets; - u64 rx_bytes; - u64 tx_bytes; -}; - -/* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. @@ -162,7 +150,6 @@ struct ppp { struct bpf_prog *active_filter; /* filter for pkts to reset idle */ #endif /* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */ - struct ppp_link_stats stats64; /* 64 bit network stats */ }; /* @@ -1539,23 +1526,12 @@ ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr, static void ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { - struct ppp *ppp = netdev_priv(dev); - - ppp_recv_lock(ppp); - stats64->rx_packets = ppp->stats64.rx_packets; - stats64->rx_bytes = ppp->stats64.rx_bytes; - ppp_recv_unlock(ppp); - - ppp_xmit_lock(ppp); - stats64->tx_packets = ppp->stats64.tx_packets; - stats64->tx_bytes = ppp->stats64.tx_bytes; - ppp_xmit_unlock(ppp); - stats64->rx_errors = dev->stats.rx_errors; stats64->tx_errors = dev->stats.tx_errors; stats64->rx_dropped = dev->stats.rx_dropped; stats64->tx_dropped = dev->stats.tx_dropped; stats64->rx_length_errors = dev->stats.rx_length_errors; + dev_fetch_sw_netstats(stats64, dev->tstats); } static int ppp_dev_init(struct net_device *dev) @@ -1650,6 +1626,7 @@ static void ppp_setup(struct net_device *dev) dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->priv_destructor = ppp_dev_priv_destructor; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; netif_keep_dst(dev); } @@ -1796,8 +1773,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) #endif /* CONFIG_PPP_FILTER */ } - ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; + dev_sw_netstats_tx_add(ppp->dev, 1, skb->len - PPP_PROTO_LEN); switch (proto) { case PPP_IP: @@ -2474,8 +2450,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) break; } - ++ppp->stats64.rx_packets; - ppp->stats64.rx_bytes += skb->len - 2; + dev_sw_netstats_rx_add(ppp->dev, skb->len - PPP_PROTO_LEN); npi = proto_to_npindex(proto); if (npi < 0) { @@ -3303,14 +3278,25 @@ static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) { struct slcompress *vj = ppp->vj; + int cpu; memset(st, 0, sizeof(*st)); - st->p.ppp_ipackets = ppp->stats64.rx_packets; + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *p = per_cpu_ptr(ppp->dev->tstats, cpu); + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + + rx_packets = u64_stats_read(&p->rx_packets); + rx_bytes = u64_stats_read(&p->rx_bytes); + tx_packets = u64_stats_read(&p->tx_packets); + tx_bytes = u64_stats_read(&p->tx_bytes); + + st->p.ppp_ipackets += rx_packets; + st->p.ppp_ibytes += rx_bytes; + st->p.ppp_opackets += tx_packets; + st->p.ppp_obytes += tx_bytes; + } st->p.ppp_ierrors = ppp->dev->stats.rx_errors; - st->p.ppp_ibytes = ppp->stats64.rx_bytes; - st->p.ppp_opackets = ppp->stats64.tx_packets; st->p.ppp_oerrors = ppp->dev->stats.tx_errors; - st->p.ppp_obytes = ppp->stats64.tx_bytes; if (!vj) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index 7d60a714ca53..4de004813560 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -12,6 +12,8 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pse-pd/pse.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> #define PD692X0_PSE_NAME "pd692x0_pse" @@ -76,6 +78,8 @@ enum { PD692X0_MSG_GET_PORT_CLASS, PD692X0_MSG_GET_PORT_MEAS, PD692X0_MSG_GET_PORT_PARAM, + PD692X0_MSG_GET_POWER_BANK, + PD692X0_MSG_SET_POWER_BANK, /* add new message above here */ PD692X0_MSG_CNT @@ -95,6 +99,8 @@ struct pd692x0_priv { unsigned long last_cmd_key_time; enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS]; + struct regulator_dev *manager_reg[PD692X0_MAX_MANAGERS]; + int manager_pw_budget[PD692X0_MAX_MANAGERS]; }; /* Template list of communication messages. The non-null bytes defined here @@ -170,6 +176,16 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = { .data = {0x4e, 0x4e, 0x4e, 0x4e, 0x4e, 0x4e, 0x4e, 0x4e}, }, + [PD692X0_MSG_GET_POWER_BANK] = { + .key = PD692X0_KEY_REQ, + .sub = {0x07, 0x0b, 0x57}, + .data = { 0, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_SET_POWER_BANK] = { + .key = PD692X0_KEY_CMD, + .sub = {0x07, 0x0b, 0x57}, + }, }; static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo) @@ -739,6 +755,29 @@ pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id) return (buf.data[0] << 4 | buf.data[1]) * 100; } +static int +pd692x0_pi_get_prio(struct pse_controller_dev *pcdev, int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + if (!buf.data[2] || buf.data[2] > pcdev->pis_prio_max + 1) + return -ERANGE; + + /* PSE core priority start at 0 */ + return buf.data[2] - 1; +} + static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv) { struct device *dev = &priv->client->dev; @@ -766,6 +805,7 @@ static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv) struct pd692x0_manager { struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS]; + struct device_node *node; int nports; }; @@ -857,6 +897,8 @@ pd692x0_of_get_managers(struct pd692x0_priv *priv, if (ret) goto out; + of_node_get(node); + manager[manager_id].node = node; nmanagers++; } @@ -869,6 +911,8 @@ out: of_node_put(manager[i].port_node[j]); manager[i].port_node[j] = NULL; } + of_node_put(manager[i].node); + manager[i].node = NULL; } of_node_put(node); @@ -876,6 +920,143 @@ out: return ret; } +static const struct regulator_ops dummy_ops; + +static struct regulator_dev * +pd692x0_register_manager_regulator(struct device *dev, char *reg_name, + struct device_node *node) +{ + struct regulator_init_data *rinit_data; + struct regulator_config rconfig = {0}; + struct regulator_desc *rdesc; + struct regulator_dev *rdev; + + rinit_data = devm_kzalloc(dev, sizeof(*rinit_data), + GFP_KERNEL); + if (!rinit_data) + return ERR_PTR(-ENOMEM); + + rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL); + if (!rdesc) + return ERR_PTR(-ENOMEM); + + rdesc->name = reg_name; + rdesc->type = REGULATOR_VOLTAGE; + rdesc->ops = &dummy_ops; + rdesc->owner = THIS_MODULE; + + rinit_data->supply_regulator = "vmain"; + + rconfig.dev = dev; + rconfig.init_data = rinit_data; + rconfig.of_node = node; + + rdev = devm_regulator_register(dev, rdesc, &rconfig); + if (IS_ERR(rdev)) { + dev_err_probe(dev, PTR_ERR(rdev), + "Failed to register regulator\n"); + return rdev; + } + + return rdev; +} + +static int +pd692x0_register_managers_regulator(struct pd692x0_priv *priv, + const struct pd692x0_manager *manager, + int nmanagers) +{ + struct device *dev = &priv->client->dev; + size_t reg_name_len; + int i; + + /* Each regulator name len is dev name + 12 char + + * int max digit number (10) + 1 + */ + reg_name_len = strlen(dev_name(dev)) + 23; + + for (i = 0; i < nmanagers; i++) { + static const char * const regulators[] = { "vaux5", "vaux3p3" }; + struct regulator_dev *rdev; + char *reg_name; + int ret; + + reg_name = devm_kzalloc(dev, reg_name_len, GFP_KERNEL); + if (!reg_name) + return -ENOMEM; + snprintf(reg_name, 26, "pse-%s-manager%d", dev_name(dev), i); + rdev = pd692x0_register_manager_regulator(dev, reg_name, + manager[i].node); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + /* VMAIN is described as main supply for the manager. + * Add other VAUX power supplies and link them to the + * virtual device rdev->dev. + */ + ret = devm_regulator_bulk_get_enable(&rdev->dev, + ARRAY_SIZE(regulators), + regulators); + if (ret) + return dev_err_probe(&rdev->dev, ret, + "Failed to enable regulators\n"); + + priv->manager_reg[i] = rdev; + } + + return 0; +} + +static int +pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id, int pw) +{ + struct pd692x0_msg msg, buf; + int ret, pw_mW = pw / 1000; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_POWER_BANK]; + msg.data[0] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_POWER_BANK]; + msg.data[0] = id; + msg.data[1] = pw_mW >> 8; + msg.data[2] = pw_mW & 0xff; + msg.data[3] = buf.sub[2]; + msg.data[4] = buf.data[0]; + msg.data[5] = buf.data[1]; + msg.data[6] = buf.data[2]; + msg.data[7] = buf.data[3]; + return pd692x0_sendrecv_msg(priv, &msg, &buf); +} + +static int +pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers) +{ + int i, ret; + + for (i = 0; i < nmanagers; i++) { + struct regulator *supply = priv->manager_reg[i]->supply; + int pw_budget; + + pw_budget = regulator_get_unclaimed_power_budget(supply); + /* Max power budget per manager */ + if (pw_budget > 6000000) + pw_budget = 6000000; + ret = regulator_request_power_budget(supply, pw_budget); + if (ret < 0) + return ret; + + priv->manager_pw_budget[i] = pw_budget; + ret = pd692x0_conf_manager_power_budget(priv, i, pw_budget); + if (ret < 0) + return ret; + } + + return 0; +} + static int pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset, const struct pd692x0_manager *manager, @@ -998,6 +1179,14 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) return ret; nmanagers = ret; + ret = pd692x0_register_managers_regulator(priv, manager, nmanagers); + if (ret) + goto out; + + ret = pd692x0_configure_managers(priv, nmanagers); + if (ret) + goto out; + ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix); if (ret) goto out; @@ -1008,8 +1197,14 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) out: for (i = 0; i < nmanagers; i++) { + struct regulator *supply = priv->manager_reg[i]->supply; + + regulator_free_power_budget(supply, + priv->manager_pw_budget[i]); + for (j = 0; j < manager[i].nports; j++) of_node_put(manager[i].port_node[j]); + of_node_put(manager[i].node); } return ret; } @@ -1071,6 +1266,25 @@ static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev, return pd692x0_sendrecv_msg(priv, &msg, &buf); } +static int pd692x0_pi_set_prio(struct pse_controller_dev *pcdev, int id, + unsigned int prio) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM]; + msg.sub[2] = id; + /* Controller priority from 1 to 3 */ + msg.data[4] = prio + 1; + + return pd692x0_sendrecv_msg(priv, &msg, &buf); +} + static const struct pse_controller_ops pd692x0_ops = { .setup_pi_matrix = pd692x0_setup_pi_matrix, .pi_get_admin_state = pd692x0_pi_get_admin_state, @@ -1084,6 +1298,8 @@ static const struct pse_controller_ops pd692x0_ops = { .pi_get_pw_limit = pd692x0_pi_get_pw_limit, .pi_set_pw_limit = pd692x0_pi_set_pw_limit, .pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges, + .pi_get_prio = pd692x0_pi_get_prio, + .pi_set_prio = pd692x0_pi_set_prio, }; #define PD692X0_FW_LINE_MAX_SZ 0xff @@ -1437,6 +1653,7 @@ static const struct fw_upload_ops pd692x0_fw_ops = { static int pd692x0_i2c_probe(struct i2c_client *client) { + static const char * const regulators[] = { "vdd", "vdda" }; struct pd692x0_msg msg, buf = {0}, zero = {0}; struct device *dev = &client->dev; struct pd692x0_msg_ver ver; @@ -1444,6 +1661,12 @@ static int pd692x0_i2c_probe(struct i2c_client *client) struct fw_upload *fwl; int ret; + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators), + regulators); + if (ret) + return dev_err_probe(dev, ret, + "Failed to enable regulators\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "i2c check functionality failed\n"); return -ENXIO; @@ -1500,6 +1723,8 @@ static int pd692x0_i2c_probe(struct i2c_client *client) priv->pcdev.ops = &pd692x0_ops; priv->pcdev.dev = dev; priv->pcdev.types = ETHTOOL_PSE_C33; + priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_DYNAMIC; + priv->pcdev.pis_prio_max = 2; ret = devm_pse_controller_register(dev, &priv->pcdev); if (ret) return dev_err_probe(dev, ret, diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 4602e26eb8c8..23eb3c9d0bcd 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -7,13 +7,21 @@ #include <linux/device.h> #include <linux/ethtool.h> +#include <linux/ethtool_netlink.h> #include <linux/of.h> +#include <linux/phy.h> #include <linux/pse-pd/pse.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/rtnetlink.h> +#include <net/net_trackers.h> + +#define PSE_PW_D_LIMIT INT_MAX static DEFINE_MUTEX(pse_list_mutex); static LIST_HEAD(pse_controller_list); +static DEFINE_XARRAY_ALLOC(pse_pw_d_map); +static DEFINE_MUTEX(pse_pw_d_mutex); /** * struct pse_control - a PSE control @@ -23,6 +31,7 @@ static LIST_HEAD(pse_controller_list); * @list: list entry for the pcdev's PSE controller list * @id: ID of the PSE line in the PSE controller device * @refcnt: Number of gets of this pse_control + * @attached_phydev: PHY device pointer attached by the PSE control */ struct pse_control { struct pse_controller_dev *pcdev; @@ -30,6 +39,22 @@ struct pse_control { struct list_head list; unsigned int id; struct kref refcnt; + struct phy_device *attached_phydev; +}; + +/** + * struct pse_power_domain - a PSE power domain + * @id: ID of the power domain + * @supply: Power supply the Power Domain + * @refcnt: Number of gets of this pse_power_domain + * @budget_eval_strategy: Current power budget evaluation strategy of the + * power domain + */ +struct pse_power_domain { + int id; + struct regulator *supply; + struct kref refcnt; + u32 budget_eval_strategy; }; static int of_load_single_pse_pi_pairset(struct device_node *node, @@ -208,10 +233,185 @@ out: return ret; } +/** + * pse_control_find_net_by_id - Find net attached to the pse control id + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * + * Return: pse_control pointer or NULL. The device returned has had a + * reference added and the pointer is safe until the user calls + * pse_control_put() to indicate they have finished with it. + */ +static struct pse_control * +pse_control_find_by_id(struct pse_controller_dev *pcdev, int id) +{ + struct pse_control *psec; + + mutex_lock(&pse_list_mutex); + list_for_each_entry(psec, &pcdev->pse_control_head, list) { + if (psec->id == id) { + kref_get(&psec->refcnt); + mutex_unlock(&pse_list_mutex); + return psec; + } + } + mutex_unlock(&pse_list_mutex); + return NULL; +} + +/** + * pse_control_get_netdev - Return netdev associated to a PSE control + * @psec: PSE control pointer + * + * Return: netdev pointer or NULL + */ +static struct net_device *pse_control_get_netdev(struct pse_control *psec) +{ + ASSERT_RTNL(); + + if (!psec || !psec->attached_phydev) + return NULL; + + return psec->attached_phydev->attached_dev; +} + +/** + * pse_pi_is_hw_enabled - Is PI enabled at the hardware level + * @pcdev: a pointer to the PSE controller device + * @id: Index of the PI + * + * Return: 1 if the PI is enabled at the hardware level, 0 if not, and + * a failure value on error + */ +static int pse_pi_is_hw_enabled(struct pse_controller_dev *pcdev, int id) +{ + struct pse_admin_state admin_state = {0}; + int ret; + + ret = pcdev->ops->pi_get_admin_state(pcdev, id, &admin_state); + if (ret < 0) + return ret; + + /* PI is well enabled at the hardware level */ + if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED || + admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED) + return 1; + + return 0; +} + +/** + * pse_pi_is_admin_enable_pending - Check if PI is in admin enable pending state + * which mean the power is not yet being + * delivered + * @pcdev: a pointer to the PSE controller device + * @id: Index of the PI + * + * Detects if a PI is enabled in software with a PD detected, but the hardware + * admin state hasn't been applied yet. + * + * This function is used in the power delivery and retry mechanisms to determine + * which PIs need to have power delivery attempted again. + * + * Return: true if the PI has admin enable flag set in software but not yet + * reflected in the hardware admin state, false otherwise. + */ +static bool +pse_pi_is_admin_enable_pending(struct pse_controller_dev *pcdev, int id) +{ + int ret; + + /* PI not enabled or nothing is plugged */ + if (!pcdev->pi[id].admin_state_enabled || + !pcdev->pi[id].isr_pd_detected) + return false; + + ret = pse_pi_is_hw_enabled(pcdev, id); + /* PSE PI is already enabled at hardware level */ + if (ret == 1) + return false; + + return true; +} + +static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev, + int id, + struct netlink_ext_ack *extack); + +/** + * pse_pw_d_retry_power_delivery - Retry power delivery for pending ports in a + * PSE power domain + * @pcdev: a pointer to the PSE controller device + * @pw_d: a pointer to the PSE power domain + * + * Scans all ports in the specified power domain and attempts to enable power + * delivery to any ports that have admin enable state set but don't yet have + * hardware power enabled. Used when there are changes in connection status, + * admin state, or priority that might allow previously unpowered ports to + * receive power, especially in over-budget conditions. + */ +static void pse_pw_d_retry_power_delivery(struct pse_controller_dev *pcdev, + struct pse_power_domain *pw_d) +{ + int i, ret = 0; + + for (i = 0; i < pcdev->nr_lines; i++) { + int prio_max = pcdev->nr_lines; + struct netlink_ext_ack extack; + + if (pcdev->pi[i].pw_d != pw_d) + continue; + + if (!pse_pi_is_admin_enable_pending(pcdev, i)) + continue; + + /* Do not try to enable PI with a lower prio (higher value) + * than one which already can't be enabled. + */ + if (pcdev->pi[i].prio > prio_max) + continue; + + ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, i, &extack); + if (ret == -ERANGE) + prio_max = pcdev->pi[i].prio; + } +} + +/** + * pse_pw_d_is_sw_pw_control - Determine if power control is software managed + * @pcdev: a pointer to the PSE controller device + * @pw_d: a pointer to the PSE power domain + * + * This function determines whether the power control for a specific power + * domain is managed by software in the interrupt handler rather than directly + * by hardware. + * + * Software power control is active in the following cases: + * - When the budget evaluation strategy is set to static + * - When the budget evaluation strategy is disabled but the PSE controller + * has an interrupt handler that can report if a Powered Device is connected + * + * Return: true if the power control of the power domain is managed by software, + * false otherwise + */ +static bool pse_pw_d_is_sw_pw_control(struct pse_controller_dev *pcdev, + struct pse_power_domain *pw_d) +{ + if (!pw_d) + return false; + + if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC) + return true; + if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_DISABLED && + pcdev->ops->pi_enable && pcdev->irq) + return true; + + return false; +} + static int pse_pi_is_enabled(struct regulator_dev *rdev) { struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); - struct pse_admin_state admin_state = {0}; const struct pse_controller_ops *ops; int id, ret; @@ -221,13 +421,12 @@ static int pse_pi_is_enabled(struct regulator_dev *rdev) id = rdev_get_id(rdev); mutex_lock(&pcdev->lock); - ret = ops->pi_get_admin_state(pcdev, id, &admin_state); - if (ret) + if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) { + ret = pcdev->pi[id].admin_state_enabled; goto out; + } - if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED || - admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED) - ret = 1; + ret = pse_pi_is_hw_enabled(pcdev, id); out: mutex_unlock(&pcdev->lock); @@ -235,11 +434,239 @@ out: return ret; } +/** + * pse_pi_deallocate_pw_budget - Deallocate power budget of the PI + * @pi: a pointer to the PSE PI + */ +static void pse_pi_deallocate_pw_budget(struct pse_pi *pi) +{ + if (!pi->pw_d || !pi->pw_allocated_mW) + return; + + regulator_free_power_budget(pi->pw_d->supply, pi->pw_allocated_mW); + pi->pw_allocated_mW = 0; +} + +/** + * _pse_pi_disable - Call disable operation. Assumes the PSE lock has been + * acquired. + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * + * Return: 0 on success and failure value on error + */ +static int _pse_pi_disable(struct pse_controller_dev *pcdev, int id) +{ + const struct pse_controller_ops *ops = pcdev->ops; + int ret; + + if (!ops->pi_disable) + return -EOPNOTSUPP; + + ret = ops->pi_disable(pcdev, id); + if (ret) + return ret; + + pse_pi_deallocate_pw_budget(&pcdev->pi[id]); + + if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) + pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[id].pw_d); + + return 0; +} + +/** + * pse_disable_pi_pol - Disable a PI on a power budget policy + * @pcdev: a pointer to the PSE + * @id: index of the PSE PI + * + * Return: 0 on success and failure value on error + */ +static int pse_disable_pi_pol(struct pse_controller_dev *pcdev, int id) +{ + unsigned long notifs = ETHTOOL_PSE_EVENT_OVER_BUDGET; + struct pse_ntf ntf = {}; + int ret; + + dev_dbg(pcdev->dev, "Disabling PI %d to free power budget\n", id); + + ret = _pse_pi_disable(pcdev, id); + if (ret) + notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR; + + ntf.notifs = notifs; + ntf.id = id; + kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1, &pcdev->ntf_fifo_lock); + schedule_work(&pcdev->ntf_work); + + return ret; +} + +/** + * pse_disable_pi_prio - Disable all PIs of a given priority inside a PSE + * power domain + * @pcdev: a pointer to the PSE + * @pw_d: a pointer to the PSE power domain + * @prio: priority + * + * Return: 0 on success and failure value on error + */ +static int pse_disable_pi_prio(struct pse_controller_dev *pcdev, + struct pse_power_domain *pw_d, + int prio) +{ + int i; + + for (i = 0; i < pcdev->nr_lines; i++) { + int ret; + + if (pcdev->pi[i].prio != prio || + pcdev->pi[i].pw_d != pw_d || + pse_pi_is_hw_enabled(pcdev, i) <= 0) + continue; + + ret = pse_disable_pi_pol(pcdev, i); + if (ret) + return ret; + } + + return 0; +} + +/** + * pse_pi_allocate_pw_budget_static_prio - Allocate power budget for the PI + * when the budget eval strategy is + * static + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * @pw_req: power requested in mW + * @extack: extack for error reporting + * + * Allocates power using static budget evaluation strategy, where allocation + * is based on PD classification. When insufficient budget is available, + * lower-priority ports (higher priority numbers) are turned off first. + * + * Return: 0 on success and failure value on error + */ +static int +pse_pi_allocate_pw_budget_static_prio(struct pse_controller_dev *pcdev, int id, + int pw_req, struct netlink_ext_ack *extack) +{ + struct pse_pi *pi = &pcdev->pi[id]; + int ret, _prio; + + _prio = pcdev->nr_lines; + while (regulator_request_power_budget(pi->pw_d->supply, pw_req) == -ERANGE) { + if (_prio <= pi->prio) { + NL_SET_ERR_MSG_FMT(extack, + "PI %d: not enough power budget available", + id); + return -ERANGE; + } + + ret = pse_disable_pi_prio(pcdev, pi->pw_d, _prio); + if (ret < 0) + return ret; + + _prio--; + } + + pi->pw_allocated_mW = pw_req; + return 0; +} + +/** + * pse_pi_allocate_pw_budget - Allocate power budget for the PI + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * @pw_req: power requested in mW + * @extack: extack for error reporting + * + * Return: 0 on success and failure value on error + */ +static int pse_pi_allocate_pw_budget(struct pse_controller_dev *pcdev, int id, + int pw_req, struct netlink_ext_ack *extack) +{ + struct pse_pi *pi = &pcdev->pi[id]; + + if (!pi->pw_d) + return 0; + + /* PSE_BUDGET_EVAL_STRAT_STATIC */ + if (pi->pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC) + return pse_pi_allocate_pw_budget_static_prio(pcdev, id, pw_req, + extack); + + return 0; +} + +/** + * _pse_pi_delivery_power_sw_pw_ctrl - Enable PSE PI in case of software power + * control. Assumes the PSE lock has been + * acquired. + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * @extack: extack for error reporting + * + * Return: 0 on success and failure value on error + */ +static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev, + int id, + struct netlink_ext_ack *extack) +{ + const struct pse_controller_ops *ops = pcdev->ops; + struct pse_pi *pi = &pcdev->pi[id]; + int ret, pw_req; + + if (!ops->pi_get_pw_req) { + /* No power allocation management */ + ret = ops->pi_enable(pcdev, id); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "PI %d: enable error %d", + id, ret); + return ret; + } + + ret = ops->pi_get_pw_req(pcdev, id); + if (ret < 0) + return ret; + + pw_req = ret; + + /* Compare requested power with port power limit and use the lowest + * one. + */ + if (ops->pi_get_pw_limit) { + ret = ops->pi_get_pw_limit(pcdev, id); + if (ret < 0) + return ret; + + if (ret < pw_req) + pw_req = ret; + } + + ret = pse_pi_allocate_pw_budget(pcdev, id, pw_req, extack); + if (ret) + return ret; + + ret = ops->pi_enable(pcdev, id); + if (ret) { + pse_pi_deallocate_pw_budget(pi); + NL_SET_ERR_MSG_FMT(extack, + "PI %d: enable error %d", + id, ret); + return ret; + } + + return 0; +} + static int pse_pi_enable(struct regulator_dev *rdev) { struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); const struct pse_controller_ops *ops; - int id, ret; + int id, ret = 0; ops = pcdev->ops; if (!ops->pi_enable) @@ -247,6 +674,23 @@ static int pse_pi_enable(struct regulator_dev *rdev) id = rdev_get_id(rdev); mutex_lock(&pcdev->lock); + if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) { + /* Manage enabled status by software. + * Real enable process will happen if a port is connected. + */ + if (pcdev->pi[id].isr_pd_detected) { + struct netlink_ext_ack extack; + + ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id, &extack); + } + if (!ret || ret == -ERANGE) { + pcdev->pi[id].admin_state_enabled = 1; + ret = 0; + } + mutex_unlock(&pcdev->lock); + return ret; + } + ret = ops->pi_enable(pcdev, id); if (!ret) pcdev->pi[id].admin_state_enabled = 1; @@ -258,21 +702,18 @@ static int pse_pi_enable(struct regulator_dev *rdev) static int pse_pi_disable(struct regulator_dev *rdev) { struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); - const struct pse_controller_ops *ops; + struct pse_pi *pi; int id, ret; - ops = pcdev->ops; - if (!ops->pi_disable) - return -EOPNOTSUPP; - id = rdev_get_id(rdev); + pi = &pcdev->pi[id]; mutex_lock(&pcdev->lock); - ret = ops->pi_disable(pcdev, id); + ret = _pse_pi_disable(pcdev, id); if (!ret) - pcdev->pi[id].admin_state_enabled = 0; - mutex_unlock(&pcdev->lock); + pi->admin_state_enabled = 0; - return ret; + mutex_unlock(&pcdev->lock); + return 0; } static int _pse_pi_get_voltage(struct regulator_dev *rdev) @@ -437,6 +878,158 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev, return 0; } +static void __pse_pw_d_release(struct kref *kref) +{ + struct pse_power_domain *pw_d = container_of(kref, + struct pse_power_domain, + refcnt); + + regulator_put(pw_d->supply); + xa_erase(&pse_pw_d_map, pw_d->id); + mutex_unlock(&pse_pw_d_mutex); +} + +/** + * pse_flush_pw_ds - flush all PSE power domains of a PSE + * @pcdev: a pointer to the initialized PSE controller device + */ +static void pse_flush_pw_ds(struct pse_controller_dev *pcdev) +{ + struct pse_power_domain *pw_d; + int i; + + for (i = 0; i < pcdev->nr_lines; i++) { + if (!pcdev->pi[i].pw_d) + continue; + + pw_d = xa_load(&pse_pw_d_map, pcdev->pi[i].pw_d->id); + if (!pw_d) + continue; + + kref_put_mutex(&pw_d->refcnt, __pse_pw_d_release, + &pse_pw_d_mutex); + } +} + +/** + * devm_pse_alloc_pw_d - allocate a new PSE power domain for a device + * @dev: device that is registering this PSE power domain + * + * Return: Pointer to the newly allocated PSE power domain or error pointers + */ +static struct pse_power_domain *devm_pse_alloc_pw_d(struct device *dev) +{ + struct pse_power_domain *pw_d; + int index, ret; + + pw_d = devm_kzalloc(dev, sizeof(*pw_d), GFP_KERNEL); + if (!pw_d) + return ERR_PTR(-ENOMEM); + + ret = xa_alloc(&pse_pw_d_map, &index, pw_d, XA_LIMIT(1, PSE_PW_D_LIMIT), + GFP_KERNEL); + if (ret) + return ERR_PTR(ret); + + kref_init(&pw_d->refcnt); + pw_d->id = index; + return pw_d; +} + +/** + * pse_register_pw_ds - register the PSE power domains for a PSE + * @pcdev: a pointer to the PSE controller device + * + * Return: 0 on success and failure value on error + */ +static int pse_register_pw_ds(struct pse_controller_dev *pcdev) +{ + int i, ret = 0; + + mutex_lock(&pse_pw_d_mutex); + for (i = 0; i < pcdev->nr_lines; i++) { + struct regulator_dev *rdev = pcdev->pi[i].rdev; + struct pse_power_domain *pw_d; + struct regulator *supply; + bool present = false; + unsigned long index; + + /* No regulator or regulator parent supply registered. + * We need a regulator parent to register a PSE power domain + */ + if (!rdev || !rdev->supply) + continue; + + xa_for_each(&pse_pw_d_map, index, pw_d) { + /* Power supply already registered as a PSE power + * domain. + */ + if (regulator_is_equal(pw_d->supply, rdev->supply)) { + present = true; + pcdev->pi[i].pw_d = pw_d; + break; + } + } + if (present) { + kref_get(&pw_d->refcnt); + continue; + } + + pw_d = devm_pse_alloc_pw_d(pcdev->dev); + if (IS_ERR(pw_d)) { + ret = PTR_ERR(pw_d); + goto out; + } + + supply = regulator_get(&rdev->dev, rdev->supply_name); + if (IS_ERR(supply)) { + xa_erase(&pse_pw_d_map, pw_d->id); + ret = PTR_ERR(supply); + goto out; + } + + pw_d->supply = supply; + if (pcdev->supp_budget_eval_strategies) + pw_d->budget_eval_strategy = pcdev->supp_budget_eval_strategies; + else + pw_d->budget_eval_strategy = PSE_BUDGET_EVAL_STRAT_DISABLED; + kref_init(&pw_d->refcnt); + pcdev->pi[i].pw_d = pw_d; + } + +out: + mutex_unlock(&pse_pw_d_mutex); + return ret; +} + +/** + * pse_send_ntf_worker - Worker to send PSE notifications + * @work: work object + * + * Manage and send PSE netlink notifications using a workqueue to avoid + * deadlock between pcdev_lock and pse_list_mutex. + */ +static void pse_send_ntf_worker(struct work_struct *work) +{ + struct pse_controller_dev *pcdev; + struct pse_ntf ntf; + + pcdev = container_of(work, struct pse_controller_dev, ntf_work); + + while (kfifo_out(&pcdev->ntf_fifo, &ntf, 1)) { + struct net_device *netdev; + struct pse_control *psec; + + psec = pse_control_find_by_id(pcdev, ntf.id); + rtnl_lock(); + netdev = pse_control_get_netdev(psec); + if (netdev) + ethnl_pse_send_ntf(netdev, ntf.notifs); + rtnl_unlock(); + pse_control_put(psec); + } +} + /** * pse_controller_register - register a PSE controller device * @pcdev: a pointer to the initialized PSE controller device @@ -450,6 +1043,13 @@ int pse_controller_register(struct pse_controller_dev *pcdev) mutex_init(&pcdev->lock); INIT_LIST_HEAD(&pcdev->pse_control_head); + spin_lock_init(&pcdev->ntf_fifo_lock); + ret = kfifo_alloc(&pcdev->ntf_fifo, pcdev->nr_lines, GFP_KERNEL); + if (ret) { + dev_err(pcdev->dev, "failed to allocate kfifo notifications\n"); + return ret; + } + INIT_WORK(&pcdev->ntf_work, pse_send_ntf_worker); if (!pcdev->nr_lines) pcdev->nr_lines = 1; @@ -496,6 +1096,10 @@ int pse_controller_register(struct pse_controller_dev *pcdev) return ret; } + ret = pse_register_pw_ds(pcdev); + if (ret) + return ret; + mutex_lock(&pse_list_mutex); list_add(&pcdev->list, &pse_controller_list); mutex_unlock(&pse_list_mutex); @@ -510,7 +1114,12 @@ EXPORT_SYMBOL_GPL(pse_controller_register); */ void pse_controller_unregister(struct pse_controller_dev *pcdev) { + pse_flush_pw_ds(pcdev); pse_release_pis(pcdev); + if (pcdev->irq) + disable_irq(pcdev->irq); + cancel_work_sync(&pcdev->ntf_work); + kfifo_free(&pcdev->ntf_fifo); mutex_lock(&pse_list_mutex); list_del(&pcdev->list); mutex_unlock(&pse_list_mutex); @@ -557,6 +1166,191 @@ int devm_pse_controller_register(struct device *dev, } EXPORT_SYMBOL_GPL(devm_pse_controller_register); +struct pse_irq { + struct pse_controller_dev *pcdev; + struct pse_irq_desc desc; + unsigned long *notifs; +}; + +/** + * pse_to_regulator_notifs - Convert PSE notifications to Regulator + * notifications + * @notifs: PSE notifications + * + * Return: Regulator notifications + */ +static unsigned long pse_to_regulator_notifs(unsigned long notifs) +{ + unsigned long rnotifs = 0; + + if (notifs & ETHTOOL_PSE_EVENT_OVER_CURRENT) + rnotifs |= REGULATOR_EVENT_OVER_CURRENT; + if (notifs & ETHTOOL_PSE_EVENT_OVER_TEMP) + rnotifs |= REGULATOR_EVENT_OVER_TEMP; + + return rnotifs; +} + +/** + * pse_set_config_isr - Set PSE control config according to the PSE + * notifications + * @pcdev: a pointer to the PSE + * @id: index of the PSE control + * @notifs: PSE event notifications + * + * Return: 0 on success and failure value on error + */ +static int pse_set_config_isr(struct pse_controller_dev *pcdev, int id, + unsigned long notifs) +{ + int ret = 0; + + if (notifs & PSE_BUDGET_EVAL_STRAT_DYNAMIC) + return 0; + + if ((notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) && + ((notifs & ETHTOOL_C33_PSE_EVENT_DETECTION) || + (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION))) { + dev_dbg(pcdev->dev, + "PI %d: error, connection and disconnection reported simultaneously", + id); + return -EINVAL; + } + + if (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION) { + struct netlink_ext_ack extack; + + pcdev->pi[id].isr_pd_detected = true; + if (pcdev->pi[id].admin_state_enabled) { + ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id, + &extack); + if (ret == -ERANGE) + ret = 0; + } + } else if (notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) { + if (pcdev->pi[id].admin_state_enabled && + pcdev->pi[id].isr_pd_detected) + ret = _pse_pi_disable(pcdev, id); + pcdev->pi[id].isr_pd_detected = false; + } + + return ret; +} + +/** + * pse_isr - IRQ handler for PSE + * @irq: irq number + * @data: pointer to user interrupt structure + * + * Return: irqreturn_t - status of IRQ + */ +static irqreturn_t pse_isr(int irq, void *data) +{ + struct pse_controller_dev *pcdev; + unsigned long notifs_mask = 0; + struct pse_irq_desc *desc; + struct pse_irq *h = data; + int ret, i; + + desc = &h->desc; + pcdev = h->pcdev; + + /* Clear notifs mask */ + memset(h->notifs, 0, pcdev->nr_lines * sizeof(*h->notifs)); + mutex_lock(&pcdev->lock); + ret = desc->map_event(irq, pcdev, h->notifs, ¬ifs_mask); + if (ret || !notifs_mask) { + mutex_unlock(&pcdev->lock); + return IRQ_NONE; + } + + for_each_set_bit(i, ¬ifs_mask, pcdev->nr_lines) { + unsigned long notifs, rnotifs; + struct pse_ntf ntf = {}; + + /* Do nothing PI not described */ + if (!pcdev->pi[i].rdev) + continue; + + notifs = h->notifs[i]; + if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[i].pw_d)) { + ret = pse_set_config_isr(pcdev, i, notifs); + if (ret) + notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR; + } + + dev_dbg(h->pcdev->dev, + "Sending PSE notification EVT 0x%lx\n", notifs); + + ntf.notifs = notifs; + ntf.id = i; + kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1, + &pcdev->ntf_fifo_lock); + schedule_work(&pcdev->ntf_work); + + rnotifs = pse_to_regulator_notifs(notifs); + regulator_notifier_call_chain(pcdev->pi[i].rdev, rnotifs, + NULL); + } + + mutex_unlock(&pcdev->lock); + + return IRQ_HANDLED; +} + +/** + * devm_pse_irq_helper - Register IRQ based PSE event notifier + * @pcdev: a pointer to the PSE + * @irq: the irq value to be passed to request_irq + * @irq_flags: the flags to be passed to request_irq + * @d: PSE interrupt description + * + * Return: 0 on success and errno on failure + */ +int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq, + int irq_flags, const struct pse_irq_desc *d) +{ + struct device *dev = pcdev->dev; + size_t irq_name_len; + struct pse_irq *h; + char *irq_name; + int ret; + + if (!d || !d->map_event || !d->name) + return -EINVAL; + + h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); + if (!h) + return -ENOMEM; + + h->pcdev = pcdev; + h->desc = *d; + + /* IRQ name len is pcdev dev name + 5 char + irq desc name + 1 */ + irq_name_len = strlen(dev_name(pcdev->dev)) + 5 + strlen(d->name) + 1; + irq_name = devm_kzalloc(dev, irq_name_len, GFP_KERNEL); + if (!irq_name) + return -ENOMEM; + + snprintf(irq_name, irq_name_len, "pse-%s:%s", dev_name(pcdev->dev), + d->name); + + h->notifs = devm_kcalloc(dev, pcdev->nr_lines, + sizeof(*h->notifs), GFP_KERNEL); + if (!h->notifs) + return -ENOMEM; + + ret = devm_request_threaded_irq(dev, irq, NULL, pse_isr, + IRQF_ONESHOT | irq_flags, + irq_name, h); + if (ret) + dev_err(pcdev->dev, "Failed to request IRQ %d\n", irq); + + pcdev->irq = irq; + return ret; +} +EXPORT_SYMBOL_GPL(devm_pse_irq_helper); + /* PSE control section */ static void __pse_control_release(struct kref *kref) @@ -599,7 +1393,8 @@ void pse_control_put(struct pse_control *psec) EXPORT_SYMBOL_GPL(pse_control_put); static struct pse_control * -pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) +pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index, + struct phy_device *phydev) { struct pse_control *psec; int ret; @@ -622,6 +1417,20 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) goto free_psec; } + if (!pcdev->ops->pi_get_admin_state) { + ret = -EOPNOTSUPP; + goto free_psec; + } + + /* Initialize admin_state_enabled before the regulator_get. This + * aims to have the right value reported in the first is_enabled + * call in case of control managed by software. + */ + ret = pse_pi_is_hw_enabled(pcdev, index); + if (ret < 0) + goto free_psec; + + pcdev->pi[index].admin_state_enabled = ret; psec->ps = devm_regulator_get_exclusive(pcdev->dev, rdev_get_name(pcdev->pi[index].rdev)); if (IS_ERR(psec->ps)) { @@ -629,21 +1438,14 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) goto put_module; } - ret = regulator_is_enabled(psec->ps); - if (ret < 0) - goto regulator_put; - - pcdev->pi[index].admin_state_enabled = ret; - psec->pcdev = pcdev; list_add(&psec->list, &pcdev->pse_control_head); psec->id = index; + psec->attached_phydev = phydev; kref_init(&psec->refcnt); return psec; -regulator_put: - devm_regulator_put(psec->ps); put_module: module_put(pcdev->owner); free_psec: @@ -693,7 +1495,8 @@ static int psec_id_xlate(struct pse_controller_dev *pcdev, return pse_spec->args[0]; } -struct pse_control *of_pse_control_get(struct device_node *node) +struct pse_control *of_pse_control_get(struct device_node *node, + struct phy_device *phydev) { struct pse_controller_dev *r, *pcdev; struct of_phandle_args args; @@ -743,7 +1546,7 @@ struct pse_control *of_pse_control_get(struct device_node *node) } /* pse_list_mutex also protects the pcdev's pse_control list */ - psec = pse_control_get_internal(pcdev, psec_id); + psec = pse_control_get_internal(pcdev, psec_id, phydev); out: mutex_unlock(&pse_list_mutex); @@ -754,6 +1557,35 @@ out: EXPORT_SYMBOL_GPL(of_pse_control_get); /** + * pse_get_sw_admin_state - Convert the software admin state to c33 or podl + * admin state value used in the standard + * @psec: PSE control pointer + * @admin_state: a pointer to the admin_state structure + */ +static void pse_get_sw_admin_state(struct pse_control *psec, + struct pse_admin_state *admin_state) +{ + struct pse_pi *pi = &psec->pcdev->pi[psec->id]; + + if (pse_has_podl(psec)) { + if (pi->admin_state_enabled) + admin_state->podl_admin_state = + ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED; + else + admin_state->podl_admin_state = + ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED; + } + if (pse_has_c33(psec)) { + if (pi->admin_state_enabled) + admin_state->c33_admin_state = + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED; + else + admin_state->c33_admin_state = + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED; + } +} + +/** * pse_ethtool_get_status - get status of PSE control * @psec: PSE control pointer * @extack: extack for reporting useful error messages @@ -769,16 +1601,46 @@ int pse_ethtool_get_status(struct pse_control *psec, struct pse_pw_status pw_status = {0}; const struct pse_controller_ops *ops; struct pse_controller_dev *pcdev; + struct pse_pi *pi; int ret; pcdev = psec->pcdev; ops = pcdev->ops; + + pi = &pcdev->pi[psec->id]; mutex_lock(&pcdev->lock); - ret = ops->pi_get_admin_state(pcdev, psec->id, &admin_state); - if (ret) - goto out; - status->podl_admin_state = admin_state.podl_admin_state; - status->c33_admin_state = admin_state.c33_admin_state; + if (pi->pw_d) { + status->pw_d_id = pi->pw_d->id; + if (pse_pw_d_is_sw_pw_control(pcdev, pi->pw_d)) { + pse_get_sw_admin_state(psec, &admin_state); + } else { + ret = ops->pi_get_admin_state(pcdev, psec->id, + &admin_state); + if (ret) + goto out; + } + status->podl_admin_state = admin_state.podl_admin_state; + status->c33_admin_state = admin_state.c33_admin_state; + + switch (pi->pw_d->budget_eval_strategy) { + case PSE_BUDGET_EVAL_STRAT_STATIC: + status->prio_max = pcdev->nr_lines - 1; + status->prio = pi->prio; + break; + case PSE_BUDGET_EVAL_STRAT_DYNAMIC: + status->prio_max = pcdev->pis_prio_max; + if (ops->pi_get_prio) { + ret = ops->pi_get_prio(pcdev, psec->id); + if (ret < 0) + goto out; + + status->prio = ret; + } + break; + default: + break; + } + } ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status); if (ret) @@ -928,6 +1790,52 @@ int pse_ethtool_set_config(struct pse_control *psec, EXPORT_SYMBOL_GPL(pse_ethtool_set_config); /** + * pse_pi_update_pw_budget - Update PSE power budget allocated with new + * power in mW + * @pcdev: a pointer to the PSE controller device + * @id: index of the PSE PI + * @pw_req: power requested + * @extack: extack for reporting useful error messages + * + * Return: Previous power allocated on success and failure value on error + */ +static int pse_pi_update_pw_budget(struct pse_controller_dev *pcdev, int id, + const unsigned int pw_req, + struct netlink_ext_ack *extack) +{ + struct pse_pi *pi = &pcdev->pi[id]; + int previous_pw_allocated; + int pw_diff, ret = 0; + + /* We don't want pw_allocated_mW value change in the middle of an + * power budget update + */ + mutex_lock(&pcdev->lock); + previous_pw_allocated = pi->pw_allocated_mW; + pw_diff = pw_req - previous_pw_allocated; + if (!pw_diff) { + goto out; + } else if (pw_diff > 0) { + ret = regulator_request_power_budget(pi->pw_d->supply, pw_diff); + if (ret) { + NL_SET_ERR_MSG_FMT(extack, + "PI %d: not enough power budget available", + id); + goto out; + } + + } else { + regulator_free_power_budget(pi->pw_d->supply, -pw_diff); + } + pi->pw_allocated_mW = pw_req; + ret = previous_pw_allocated; + +out: + mutex_unlock(&pcdev->lock); + return ret; +} + +/** * pse_ethtool_set_pw_limit - set PSE control power limit * @psec: PSE control pointer * @extack: extack for reporting useful error messages @@ -939,7 +1847,7 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec, struct netlink_ext_ack *extack, const unsigned int pw_limit) { - int uV, uA, ret; + int uV, uA, ret, previous_pw_allocated = 0; s64 tmp_64; if (pw_limit > MAX_PI_PW) @@ -963,10 +1871,100 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec, /* uA = mW * 1000000000 / uV */ uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); - return regulator_set_current_limit(psec->ps, 0, uA); + /* Update power budget only in software power control case and + * if a Power Device is powered. + */ + if (pse_pw_d_is_sw_pw_control(psec->pcdev, + psec->pcdev->pi[psec->id].pw_d) && + psec->pcdev->pi[psec->id].admin_state_enabled && + psec->pcdev->pi[psec->id].isr_pd_detected) { + ret = pse_pi_update_pw_budget(psec->pcdev, psec->id, + pw_limit, extack); + if (ret < 0) + return ret; + previous_pw_allocated = ret; + } + + ret = regulator_set_current_limit(psec->ps, 0, uA); + if (ret < 0 && previous_pw_allocated) { + pse_pi_update_pw_budget(psec->pcdev, psec->id, + previous_pw_allocated, extack); + } + + return ret; } EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit); +/** + * pse_ethtool_set_prio - Set PSE PI priority according to the budget + * evaluation strategy + * @psec: PSE control pointer + * @extack: extack for reporting useful error messages + * @prio: priovity value + * + * Return: 0 on success and failure value on error + */ +int pse_ethtool_set_prio(struct pse_control *psec, + struct netlink_ext_ack *extack, + unsigned int prio) +{ + struct pse_controller_dev *pcdev = psec->pcdev; + const struct pse_controller_ops *ops; + int ret = 0; + + if (!pcdev->pi[psec->id].pw_d) { + NL_SET_ERR_MSG(extack, "no power domain attached"); + return -EOPNOTSUPP; + } + + /* We don't want priority change in the middle of an + * enable/disable call or a priority mode change + */ + mutex_lock(&pcdev->lock); + switch (pcdev->pi[psec->id].pw_d->budget_eval_strategy) { + case PSE_BUDGET_EVAL_STRAT_STATIC: + if (prio >= pcdev->nr_lines) { + NL_SET_ERR_MSG_FMT(extack, + "priority %d exceed priority max %d", + prio, pcdev->nr_lines); + ret = -ERANGE; + goto out; + } + + pcdev->pi[psec->id].prio = prio; + pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[psec->id].pw_d); + break; + + case PSE_BUDGET_EVAL_STRAT_DYNAMIC: + ops = psec->pcdev->ops; + if (!ops->pi_set_prio) { + NL_SET_ERR_MSG(extack, + "pse driver does not support setting port priority"); + ret = -EOPNOTSUPP; + goto out; + } + + if (prio > pcdev->pis_prio_max) { + NL_SET_ERR_MSG_FMT(extack, + "priority %d exceed priority max %d", + prio, pcdev->pis_prio_max); + ret = -ERANGE; + goto out; + } + + ret = ops->pi_set_prio(pcdev, psec->id, prio); + break; + + default: + ret = -EOPNOTSUPP; + } + +out: + mutex_unlock(&pcdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(pse_ethtool_set_prio); + bool pse_has_podl(struct pse_control *psec) { return psec->pcdev->types & ETHTOOL_PSE_PODL; diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 5e9dda2c0eac..63f8f43062bc 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -16,15 +16,34 @@ #include <linux/pse-pd/pse.h> #define TPS23881_MAX_CHANS 8 - +#define TPS23881_MAX_IRQ_RETRIES 10 + +#define TPS23881_REG_IT 0x0 +#define TPS23881_REG_IT_MASK 0x1 +#define TPS23881_REG_IT_DISF BIT(2) +#define TPS23881_REG_IT_DETC BIT(3) +#define TPS23881_REG_IT_CLASC BIT(4) +#define TPS23881_REG_IT_IFAULT BIT(5) +#define TPS23881_REG_IT_SUPF BIT(7) +#define TPS23881_REG_DET_EVENT 0x5 +#define TPS23881_REG_FAULT 0x7 +#define TPS23881_REG_SUPF_EVENT 0xb +#define TPS23881_REG_TSD BIT(7) +#define TPS23881_REG_DISC 0xc #define TPS23881_REG_PW_STATUS 0x10 #define TPS23881_REG_OP_MODE 0x12 +#define TPS23881_REG_DISC_EN 0x13 #define TPS23881_OP_MODE_SEMIAUTO 0xaaaa #define TPS23881_REG_DIS_EN 0x13 #define TPS23881_REG_DET_CLA_EN 0x14 #define TPS23881_REG_GEN_MASK 0x17 +#define TPS23881_REG_CLCHE BIT(2) +#define TPS23881_REG_DECHE BIT(3) #define TPS23881_REG_NBITACC BIT(5) +#define TPS23881_REG_INTEN BIT(7) #define TPS23881_REG_PW_EN 0x19 +#define TPS23881_REG_RESET 0x1a +#define TPS23881_REG_CLRAIN BIT(7) #define TPS23881_REG_2PAIR_POL1 0x1e #define TPS23881_REG_PORT_MAP 0x26 #define TPS23881_REG_PORT_POWER 0x29 @@ -51,6 +70,7 @@ struct tps23881_port_desc { u8 chan[2]; bool is_4p; int pw_pol; + bool exist; }; struct tps23881_priv { @@ -168,6 +188,7 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id) struct i2c_client *client = priv->client; u8 chan; u16 val; + int ret; if (id >= TPS23881_MAX_CHANS) return -ERANGE; @@ -181,7 +202,22 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id) BIT(chan % 4)); } - return i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val); + ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val); + if (ret) + return ret; + + /* Enable DC disconnect*/ + chan = priv->port[id].chan[0]; + ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN); + if (ret < 0) + return ret; + + val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4)); + ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val); + if (ret) + return ret; + + return 0; } static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id) @@ -214,6 +250,17 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id) */ mdelay(5); + /* Disable DC disconnect*/ + chan = priv->port[id].chan[0]; + ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN); + if (ret < 0) + return ret; + + val = tps23881_set_val(ret, chan, 0, 0, BIT(chan % 4)); + ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val); + if (ret) + return ret; + /* Enable detection and classification */ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN); if (ret < 0) @@ -782,8 +829,10 @@ tps23881_write_port_matrix(struct tps23881_priv *priv, hw_chan = port_matrix[i].hw_chan[0] % 4; /* Set software port matrix for existing ports */ - if (port_matrix[i].exist) + if (port_matrix[i].exist) { priv->port[pi_id].chan[0] = lgcl_chan; + priv->port[pi_id].exist = true; + } /* Initialize power policy internal value */ priv->port[pi_id].pw_pol = -1; @@ -907,6 +956,47 @@ static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev) return ret; } +static int tps23881_power_class_table[] = { + -ERANGE, + 4000, + 7000, + 15500, + 30000, + 15500, + 15500, + -ERANGE, + 45000, + 60000, + 75000, + 90000, + 15500, + 45000, + -ERANGE, + -ERANGE, +}; + +static int tps23881_pi_get_pw_req(struct pse_controller_dev *pcdev, int id) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + u8 reg, chan; + int ret; + u16 val; + + /* For a 4-pair the classification need 5ms to be completed */ + if (priv->port[id].is_4p) + mdelay(5); + + chan = priv->port[id].chan[0]; + reg = TPS23881_REG_DISC + (chan % 4); + ret = i2c_smbus_read_word_data(client, reg); + if (ret < 0) + return ret; + + val = tps23881_calc_val(ret, chan, 4, 0xf); + return tps23881_power_class_table[val]; +} + static const struct pse_controller_ops tps23881_ops = { .setup_pi_matrix = tps23881_setup_pi_matrix, .pi_enable = tps23881_pi_enable, @@ -919,6 +1009,7 @@ static const struct pse_controller_ops tps23881_ops = { .pi_get_pw_limit = tps23881_pi_get_pw_limit, .pi_set_pw_limit = tps23881_pi_set_pw_limit, .pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges, + .pi_get_pw_req = tps23881_pi_get_pw_req, }; static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin"; @@ -1017,6 +1108,307 @@ static int tps23881_flash_sram_fw(struct i2c_client *client) return 0; } +/* Convert interrupt events to 0xff to be aligned with the chan + * number. + */ +static u8 tps23881_irq_export_chans_helper(u16 reg_val, u8 field_offset) +{ + u8 val; + + val = (reg_val >> (4 + field_offset) & 0xf0) | + (reg_val >> field_offset & 0x0f); + + return val; +} + +/* Convert chan number to port number */ +static void tps23881_set_notifs_helper(struct tps23881_priv *priv, + u8 chans, + unsigned long *notifs, + unsigned long *notifs_mask, + enum ethtool_pse_event event) +{ + u8 chan; + int i; + + if (!chans) + return; + + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (!priv->port[i].exist) + continue; + /* No need to look at the 2nd channel in case of PoE4 as + * both registers are set. + */ + chan = priv->port[i].chan[0]; + + if (BIT(chan) & chans) { + *notifs_mask |= BIT(i); + notifs[i] |= event; + } + } +} + +static void tps23881_irq_event_over_temp(struct tps23881_priv *priv, + u16 reg_val, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + int i; + + if (reg_val & TPS23881_REG_TSD) { + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (!priv->port[i].exist) + continue; + + *notifs_mask |= BIT(i); + notifs[i] |= ETHTOOL_PSE_EVENT_OVER_TEMP; + } + } +} + +static int tps23881_irq_event_over_current(struct tps23881_priv *priv, + u16 reg_val, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + int i, ret; + u8 chans; + + chans = tps23881_irq_export_chans_helper(reg_val, 0); + if (!chans) + return 0; + + tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask, + ETHTOOL_PSE_EVENT_OVER_CURRENT | + ETHTOOL_C33_PSE_EVENT_DISCONNECTION); + + /* Over Current event resets the power limit registers so we need + * to configured it again. + */ + for_each_set_bit(i, notifs_mask, priv->pcdev.nr_lines) { + if (priv->port[i].pw_pol < 0) + continue; + + ret = tps23881_pi_enable_manual_pol(priv, i); + if (ret < 0) + return ret; + + /* Set power policy */ + ret = tps23881_pi_set_pw_pol_limit(priv, i, + priv->port[i].pw_pol, + priv->port[i].is_4p); + if (ret < 0) + return ret; + } + + return 0; +} + +static void tps23881_irq_event_disconnection(struct tps23881_priv *priv, + u16 reg_val, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + u8 chans; + + chans = tps23881_irq_export_chans_helper(reg_val, 4); + if (chans) + tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask, + ETHTOOL_C33_PSE_EVENT_DISCONNECTION); +} + +static int tps23881_irq_event_detection(struct tps23881_priv *priv, + u16 reg_val, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + enum ethtool_pse_event event; + int reg, ret, i, val; + unsigned long chans; + + chans = tps23881_irq_export_chans_helper(reg_val, 0); + for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) { + reg = TPS23881_REG_DISC + (i % 4); + ret = i2c_smbus_read_word_data(priv->client, reg); + if (ret < 0) + return ret; + + val = tps23881_calc_val(ret, i, 0, 0xf); + /* If detection valid */ + if (val == 0x4) + event = ETHTOOL_C33_PSE_EVENT_DETECTION; + else + event = ETHTOOL_C33_PSE_EVENT_DISCONNECTION; + + tps23881_set_notifs_helper(priv, BIT(i), notifs, + notifs_mask, event); + } + + return 0; +} + +static int tps23881_irq_event_classification(struct tps23881_priv *priv, + u16 reg_val, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + int reg, ret, val, i; + unsigned long chans; + + chans = tps23881_irq_export_chans_helper(reg_val, 4); + for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) { + reg = TPS23881_REG_DISC + (i % 4); + ret = i2c_smbus_read_word_data(priv->client, reg); + if (ret < 0) + return ret; + + val = tps23881_calc_val(ret, i, 4, 0xf); + /* Do not report classification event for unknown class */ + if (!val || val == 0x8 || val == 0xf) + continue; + + tps23881_set_notifs_helper(priv, BIT(i), notifs, + notifs_mask, + ETHTOOL_C33_PSE_EVENT_CLASSIFICATION); + } + + return 0; +} + +static int tps23881_irq_event_handler(struct tps23881_priv *priv, u16 reg, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + struct i2c_client *client = priv->client; + int ret, val; + + /* The Supply event bit is repeated twice so we only need to read + * the one from the first byte. + */ + if (reg & TPS23881_REG_IT_SUPF) { + ret = i2c_smbus_read_word_data(client, TPS23881_REG_SUPF_EVENT); + if (ret < 0) + return ret; + tps23881_irq_event_over_temp(priv, ret, notifs, notifs_mask); + } + + if (reg & (TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_IFAULT << 8 | + TPS23881_REG_IT_DISF | TPS23881_REG_IT_DISF << 8)) { + ret = i2c_smbus_read_word_data(client, TPS23881_REG_FAULT); + if (ret < 0) + return ret; + ret = tps23881_irq_event_over_current(priv, ret, notifs, + notifs_mask); + if (ret) + return ret; + + tps23881_irq_event_disconnection(priv, ret, notifs, notifs_mask); + } + + if (reg & (TPS23881_REG_IT_DETC | TPS23881_REG_IT_DETC << 8 | + TPS23881_REG_IT_CLASC | TPS23881_REG_IT_CLASC << 8)) { + ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_EVENT); + if (ret < 0) + return ret; + + val = ret; + ret = tps23881_irq_event_detection(priv, val, notifs, + notifs_mask); + if (ret) + return ret; + + ret = tps23881_irq_event_classification(priv, val, notifs, + notifs_mask); + if (ret) + return ret; + } + return 0; +} + +static int tps23881_irq_handler(int irq, struct pse_controller_dev *pcdev, + unsigned long *notifs, + unsigned long *notifs_mask) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + int ret, it_mask, retry; + + /* Get interruption mask */ + ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT_MASK); + if (ret < 0) + return ret; + it_mask = ret; + + /* Read interrupt register until it frees the interruption pin. */ + retry = 0; + while (true) { + if (retry > TPS23881_MAX_IRQ_RETRIES) { + dev_err(&client->dev, "interrupt never freed"); + return -ETIMEDOUT; + } + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT); + if (ret < 0) + return ret; + + /* No more relevant interruption */ + if (!(ret & it_mask)) + return 0; + + ret = tps23881_irq_event_handler(priv, (u16)ret, notifs, + notifs_mask); + if (ret) + return ret; + + retry++; + } + return 0; +} + +static int tps23881_setup_irq(struct tps23881_priv *priv, int irq) +{ + struct i2c_client *client = priv->client; + struct pse_irq_desc irq_desc = { + .name = "tps23881-irq", + .map_event = tps23881_irq_handler, + }; + int ret; + u16 val; + + if (!irq) { + dev_err(&client->dev, "interrupt is missing"); + return -EINVAL; + } + + val = TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_SUPF | + TPS23881_REG_IT_DETC | TPS23881_REG_IT_CLASC | + TPS23881_REG_IT_DISF; + val |= val << 8; + ret = i2c_smbus_write_word_data(client, TPS23881_REG_IT_MASK, val); + if (ret) + return ret; + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_GEN_MASK); + if (ret < 0) + return ret; + + val = TPS23881_REG_INTEN | TPS23881_REG_CLCHE | TPS23881_REG_DECHE; + val |= val << 8; + val |= (u16)ret; + ret = i2c_smbus_write_word_data(client, TPS23881_REG_GEN_MASK, val); + if (ret < 0) + return ret; + + /* Reset interrupts registers */ + ret = i2c_smbus_write_word_data(client, TPS23881_REG_RESET, + TPS23881_REG_CLRAIN); + if (ret < 0) + return ret; + + return devm_pse_irq_helper(&priv->pcdev, irq, 0, &irq_desc); +} + static int tps23881_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -1091,12 +1483,17 @@ static int tps23881_i2c_probe(struct i2c_client *client) priv->pcdev.dev = dev; priv->pcdev.types = ETHTOOL_PSE_C33; priv->pcdev.nr_lines = TPS23881_MAX_CHANS; + priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_STATIC; ret = devm_pse_controller_register(dev, &priv->pcdev); if (ret) { return dev_err_probe(dev, ret, "failed to register PSE controller\n"); } + ret = tps23881_setup_irq(priv, client->irq); + if (ret) + return ret; + return ret; } diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 370b32fc2588..0a678e31cfaa 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -113,9 +113,8 @@ config USB_RTL8152 config USB_LAN78XX tristate "Microchip LAN78XX Based USB Ethernet Adapters" select MII - select PHYLIB + select PHYLINK select MICROCHIP_PHY - select FIXED_PHY select CRC32 help This option adds support for Microchip LAN78XX based USB 2 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f53e255116ea..f00284c9ad34 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -6,6 +6,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/phylink.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/signal.h> @@ -384,7 +385,7 @@ struct skb_data { /* skb->cb is one of these */ #define EVENT_RX_HALT 1 #define EVENT_RX_MEMORY 2 #define EVENT_STS_SPLIT 3 -#define EVENT_LINK_RESET 4 +#define EVENT_PHY_INT_ACK 4 #define EVENT_RX_PAUSED 5 #define EVENT_DEV_WAKING 6 #define EVENT_DEV_ASLEEP 7 @@ -413,7 +414,6 @@ struct lan78xx_net { struct net_device *net; struct usb_device *udev; struct usb_interface *intf; - void *driver_priv; unsigned int tx_pend_data_len; size_t n_tx_urbs; @@ -448,28 +448,24 @@ struct lan78xx_net { unsigned long flags; wait_queue_head_t *wait; - unsigned char suspend_count; unsigned int maxpacket; struct timer_list stat_monitor; unsigned long data[5]; - int link_on; - u8 mdix_ctrl; - u32 chipid; u32 chiprev; struct mii_bus *mdiobus; phy_interface_t interface; - int fc_autoneg; - u8 fc_request_control; - int delta; struct statstage stats; struct irq_domain_data domain_data; + + struct phylink *phylink; + struct phylink_config phylink_config; }; /* use ethtool to change the level for any given device */ @@ -1554,28 +1550,6 @@ static void lan78xx_set_multicast(struct net_device *netdev) schedule_work(&pdata->set_multicast); } -static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev, - bool tx_pause, bool rx_pause); - -static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, - u16 lcladv, u16 rmtadv) -{ - u8 cap; - - if (dev->fc_autoneg) - cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - else - cap = dev->fc_request_control; - - netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", - (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), - (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); - - return lan78xx_configure_flowcontrol(dev, - cap & FLOW_CTRL_TX, - cap & FLOW_CTRL_RX); -} - static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev); static int lan78xx_mac_reset(struct lan78xx_net *dev) @@ -1638,75 +1612,6 @@ static int lan78xx_phy_int_ack(struct lan78xx_net *dev) return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); } -static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed); - -static int lan78xx_link_reset(struct lan78xx_net *dev) -{ - struct phy_device *phydev = dev->net->phydev; - struct ethtool_link_ksettings ecmd; - int ladv, radv, ret, link; - - /* clear LAN78xx interrupt status */ - ret = lan78xx_phy_int_ack(dev); - if (unlikely(ret < 0)) - return ret; - - mutex_lock(&phydev->lock); - phy_read_status(phydev); - link = phydev->link; - mutex_unlock(&phydev->lock); - - if (!link && dev->link_on) { - dev->link_on = false; - - /* reset MAC */ - ret = lan78xx_mac_reset(dev); - if (ret < 0) - return ret; - - timer_delete(&dev->stat_monitor); - } else if (link && !dev->link_on) { - dev->link_on = true; - - phy_ethtool_ksettings_get(phydev, &ecmd); - - ret = lan78xx_configure_usb(dev, ecmd.base.speed); - if (ret < 0) - return ret; - - ladv = phy_read(phydev, MII_ADVERTISE); - if (ladv < 0) - return ladv; - - radv = phy_read(phydev, MII_LPA); - if (radv < 0) - return radv; - - netif_dbg(dev, link, dev->net, - "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", - ecmd.base.speed, ecmd.base.duplex, ladv, radv); - - ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv, - radv); - if (ret < 0) - return ret; - - if (!timer_pending(&dev->stat_monitor)) { - dev->delta = 1; - mod_timer(&dev->stat_monitor, - jiffies + STAT_UPDATE_TIMER); - } - - lan78xx_rx_urb_submit_all(dev); - - local_bh_disable(); - napi_schedule(&dev->napi); - local_bh_enable(); - } - - return 0; -} - /* some work can't be done in tasklets, so we use keventd * * NOTE: annoying asymmetry: if it's active, schedule_work() fails, @@ -1733,7 +1638,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) if (intdata & INT_ENP_PHY_INT) { netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); - lan78xx_defer_kevent(dev, EVENT_LINK_RESET); + lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK); if (dev->domain_data.phyirq > 0) generic_handle_irq_safe(dev->domain_data.phyirq); @@ -1880,66 +1785,15 @@ exit_pm_put: static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata) { struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - int ret; - u32 buf; - - ret = usb_autopm_get_interface(dev->intf); - if (ret < 0) - return ret; - ret = phy_ethtool_get_eee(phydev, edata); - if (ret < 0) - goto exit; - - ret = lan78xx_read_reg(dev, MAC_CR, &buf); - if (buf & MAC_CR_EEE_EN_) { - /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ - ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); - edata->tx_lpi_timer = buf; - } else { - edata->tx_lpi_timer = 0; - } - - ret = 0; -exit: - usb_autopm_put_interface(dev->intf); - - return ret; + return phylink_ethtool_get_eee(dev->phylink, edata); } static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata) { struct lan78xx_net *dev = netdev_priv(net); - int ret; - u32 buf; - - ret = usb_autopm_get_interface(dev->intf); - if (ret < 0) - return ret; - - ret = phy_ethtool_set_eee(net->phydev, edata); - if (ret < 0) - goto out; - - buf = (u32)edata->tx_lpi_timer; - ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf); -out: - usb_autopm_put_interface(dev->intf); - - return ret; -} -static u32 lan78xx_get_link(struct net_device *net) -{ - u32 link; - - mutex_lock(&net->phydev->lock); - phy_read_status(net->phydev); - link = net->phydev->link; - mutex_unlock(&net->phydev->lock); - - return link; + return phylink_ethtool_set_eee(dev->phylink, edata); } static void lan78xx_get_drvinfo(struct net_device *net, @@ -1969,109 +1823,32 @@ static int lan78xx_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - int ret; - ret = usb_autopm_get_interface(dev->intf); - if (ret < 0) - return ret; - - phy_ethtool_ksettings_get(phydev, cmd); - - usb_autopm_put_interface(dev->intf); - - return ret; + return phylink_ethtool_ksettings_get(dev->phylink, cmd); } static int lan78xx_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - int ret = 0; - int temp; - ret = usb_autopm_get_interface(dev->intf); - if (ret < 0) - return ret; - - /* change speed & duplex */ - ret = phy_ethtool_ksettings_set(phydev, cmd); - - if (!cmd->base.autoneg) { - /* force link down */ - temp = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK); - mdelay(1); - phy_write(phydev, MII_BMCR, temp); - } - - usb_autopm_put_interface(dev->intf); - - return ret; + return phylink_ethtool_ksettings_set(dev->phylink, cmd); } static void lan78xx_get_pause(struct net_device *net, struct ethtool_pauseparam *pause) { struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - struct ethtool_link_ksettings ecmd; - - phy_ethtool_ksettings_get(phydev, &ecmd); - pause->autoneg = dev->fc_autoneg; - - if (dev->fc_request_control & FLOW_CTRL_TX) - pause->tx_pause = 1; - - if (dev->fc_request_control & FLOW_CTRL_RX) - pause->rx_pause = 1; + phylink_ethtool_get_pauseparam(dev->phylink, pause); } static int lan78xx_set_pause(struct net_device *net, struct ethtool_pauseparam *pause) { struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - struct ethtool_link_ksettings ecmd; - int ret; - - phy_ethtool_ksettings_get(phydev, &ecmd); - - if (pause->autoneg && !ecmd.base.autoneg) { - ret = -EINVAL; - goto exit; - } - - dev->fc_request_control = 0; - if (pause->rx_pause) - dev->fc_request_control |= FLOW_CTRL_RX; - - if (pause->tx_pause) - dev->fc_request_control |= FLOW_CTRL_TX; - - if (ecmd.base.autoneg) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; - u32 mii_adv; - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - ecmd.link_modes.advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - ecmd.link_modes.advertising); - mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - mii_adv_to_linkmode_adv_t(fc, mii_adv); - linkmode_or(ecmd.link_modes.advertising, fc, - ecmd.link_modes.advertising); - - phy_ethtool_ksettings_set(phydev, &ecmd); - } - - dev->fc_autoneg = pause->autoneg; - - ret = 0; -exit: - return ret; + return phylink_ethtool_set_pauseparam(dev->phylink, pause); } static int lan78xx_get_regs_len(struct net_device *netdev) @@ -2108,7 +1885,7 @@ clean_data: } static const struct ethtool_ops lan78xx_ethtool_ops = { - .get_link = lan78xx_get_link, + .get_link = ethtool_op_get_link, .nway_reset = phy_ethtool_nway_reset, .get_drvinfo = lan78xx_get_drvinfo, .get_msglevel = lan78xx_get_msglevel, @@ -2332,26 +2109,6 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) mdiobus_free(dev->mdiobus); } -static void lan78xx_link_status_change(struct net_device *net) -{ - struct lan78xx_net *dev = netdev_priv(net); - struct phy_device *phydev = net->phydev; - u32 data; - int ret; - - ret = lan78xx_read_reg(dev, MAC_CR, &data); - if (ret < 0) - return; - - if (phydev->enable_tx_lpi) - data |= MAC_CR_EEE_EN_; - else - data &= ~MAC_CR_EEE_EN_; - lan78xx_write_reg(dev, MAC_CR, data); - - phy_print_status(phydev); -} - static int irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { @@ -2448,10 +2205,8 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) dev->domain_data.irqchip = &lan78xx_irqchip; dev->domain_data.irq_handler = handle_simple_irq; - irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node), - MAX_INT_EP, 0, - &chip_domain_ops, - &dev->domain_data); + irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0, + &chip_domain_ops, &dev->domain_data); if (irqdomain) { /* create mapping for PHY interrupt */ irqmap = irq_create_mapping(irqdomain, INT_EP_PHY); @@ -2483,6 +2238,77 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) dev->domain_data.irqdomain = NULL; } +static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *net = to_net_dev(config->dev); + struct lan78xx_net *dev = netdev_priv(net); + u32 mac_cr = 0; + int ret; + + /* Check if the mode is supported */ + if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) { + netdev_err(net, "Unsupported negotiation mode: %u\n", mode); + return; + } + + switch (state->interface) { + case PHY_INTERFACE_MODE_GMII: + mac_cr |= MAC_CR_GMII_EN_; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + break; + default: + netdev_warn(net, "Unsupported interface mode: %d\n", + state->interface); + return; + } + + ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr); + if (ret < 0) + netdev_err(net, "Failed to config MAC with error %pe\n", + ERR_PTR(ret)); +} + +static void lan78xx_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct net_device *net = to_net_dev(config->dev); + struct lan78xx_net *dev = netdev_priv(net); + int ret; + + netif_stop_queue(net); + + /* MAC reset will not de-assert TXEN/RXEN, we need to stop them + * manually before reset. TX and RX should be disabled before running + * link_up sequence. + */ + ret = lan78xx_stop_tx_path(dev); + if (ret < 0) + goto link_down_fail; + + ret = lan78xx_stop_rx_path(dev); + if (ret < 0) + goto link_down_fail; + + /* MAC reset seems to not affect MAC configuration, no idea if it is + * really needed, but it was done in previous driver version. So, leave + * it here. + */ + ret = lan78xx_mac_reset(dev); + if (ret < 0) + goto link_down_fail; + + return; + +link_down_fail: + netdev_err(dev->net, "Failed to set MAC down with error %pe\n", + ERR_PTR(ret)); +} + /** * lan78xx_configure_usb - Configure USB link power settings * @dev: pointer to the LAN78xx device structure @@ -2618,28 +2444,155 @@ static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev, return lan78xx_write_reg(dev, FLOW, flow); } +static void lan78xx_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *net = to_net_dev(config->dev); + struct lan78xx_net *dev = netdev_priv(net); + u32 mac_cr = 0; + int ret; + + switch (speed) { + case SPEED_1000: + mac_cr |= MAC_CR_SPEED_1000_; + break; + case SPEED_100: + mac_cr |= MAC_CR_SPEED_100_; + break; + case SPEED_10: + mac_cr |= MAC_CR_SPEED_10_; + break; + default: + netdev_err(dev->net, "Unsupported speed %d\n", speed); + return; + } + + if (duplex == DUPLEX_FULL) + mac_cr |= MAC_CR_FULL_DUPLEX_; + + /* make sure TXEN and RXEN are disabled before reconfiguring MAC */ + ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ | + MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr); + if (ret < 0) + goto link_up_fail; + + ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause); + if (ret < 0) + goto link_up_fail; + + ret = lan78xx_configure_usb(dev, speed); + if (ret < 0) + goto link_up_fail; + + lan78xx_rx_urb_submit_all(dev); + + ret = lan78xx_flush_rx_fifo(dev); + if (ret < 0) + goto link_up_fail; + + ret = lan78xx_flush_tx_fifo(dev); + if (ret < 0) + goto link_up_fail; + + ret = lan78xx_start_tx_path(dev); + if (ret < 0) + goto link_up_fail; + + ret = lan78xx_start_rx_path(dev); + if (ret < 0) + goto link_up_fail; + + netif_start_queue(net); + + return; + +link_up_fail: + netdev_err(dev->net, "Failed to set MAC up with error %pe\n", + ERR_PTR(ret)); +} + /** - * lan78xx_register_fixed_phy() - Register a fallback fixed PHY + * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support * @dev: LAN78xx device + * @enable: true to enable EEE, false to disable * - * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases - * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a - * switch without a visible PHY. + * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy + * Efficient Ethernet (EEE) operation. According to current understanding + * of the LAN7800 documentation, this bit can be modified while TX and RX + * are enabled. No explicit requirement was found to disable data paths + * before changing this bit. + * + * Return: 0 on success or a negative error code + */ +static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable) +{ + u32 mac_cr = 0; + + if (enable) + mac_cr |= MAC_CR_EEE_EN_; + + return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr); +} + +static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct net_device *net = to_net_dev(config->dev); + struct lan78xx_net *dev = netdev_priv(net); + + lan78xx_mac_eee_enable(dev, false); +} + +static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct net_device *net = to_net_dev(config->dev); + struct lan78xx_net *dev = netdev_priv(net); + int ret; + + /* Software should only change this field when Energy Efficient + * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing + * EEEEN during probe, and phylink itself guarantees that + * mac_disable_tx_lpi() will have been previously called. + */ + ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer); + if (ret < 0) + return ret; + + return lan78xx_mac_eee_enable(dev, true); +} + +static const struct phylink_mac_ops lan78xx_phylink_mac_ops = { + .mac_config = lan78xx_mac_config, + .mac_link_down = lan78xx_mac_link_down, + .mac_link_up = lan78xx_mac_link_up, + .mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi, + .mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi, +}; + +/** + * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801 + * @dev: LAN78xx device + * + * Use fixed link configuration with 1 Gbps full duplex. This is used in special + * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface + * to a switch without a visible PHY. * * Return: pointer to the registered fixed PHY, or ERR_PTR() on error. */ -static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev) +static int lan78xx_set_fixed_link(struct lan78xx_net *dev) { - struct fixed_phy_status fphy_status = { - .link = 1, + static const struct phylink_link_state state = { .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; netdev_info(dev->net, - "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n"); + "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n"); - return fixed_phy_register(&fphy_status, NULL); + return phylink_set_fixed_link(dev->phylink, &state); } /** @@ -2675,7 +2628,7 @@ static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev) dev->interface = PHY_INTERFACE_MODE_RGMII; /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */ - return lan78xx_register_fixed_phy(dev); + return NULL; case ID_REV_CHIP_ID_7800_: case ID_REV_CHIP_ID_7850_: @@ -2802,20 +2755,96 @@ static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev, return lan78xx_write_reg(dev, HW_CFG, reg); } +static int lan78xx_phylink_setup(struct lan78xx_net *dev) +{ + struct phylink_config *pc = &dev->phylink_config; + struct phylink *phylink; + + pc->dev = &dev->net->dev; + pc->type = PHYLINK_NETDEV; + pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | + MAC_100 | MAC_1000FD; + pc->mac_managed_pm = true; + pc->lpi_capabilities = MAC_100FD | MAC_1000FD; + /* + * Default TX LPI (Low Power Idle) request delay count is set to 50us. + * + * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204. + * + * Reasoning: + * According to the application note in the LAN7800 documentation, a + * zero delay may negatively impact the TX data path’s ability to + * support Gigabit operation. A value of 50us is recommended as a + * reasonable default when the part operates at Gigabit speeds, + * balancing stability and power efficiency in EEE mode. This delay can + * be increased based on performance testing, as EEE is designed for + * scenarios with mostly idle links and occasional bursts of full + * bandwidth transmission. The goal is to ensure reliable Gigabit + * performance without overly aggressive power optimization during + * inactive periods. + */ + pc->lpi_timer_default = 50; + pc->eee_enabled_default = true; + + if (dev->chipid == ID_REV_CHIP_ID_7801_) + phy_interface_set_rgmii(pc->supported_interfaces); + else + __set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces); + + memcpy(dev->phylink_config.lpi_interfaces, + dev->phylink_config.supported_interfaces, + sizeof(dev->phylink_config.lpi_interfaces)); + + phylink = phylink_create(pc, dev->net->dev.fwnode, + dev->interface, &lan78xx_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + dev->phylink = phylink; + + return 0; +} + +static void lan78xx_phy_uninit(struct lan78xx_net *dev) +{ + if (dev->phylink) { + phylink_disconnect_phy(dev->phylink); + phylink_destroy(dev->phylink); + dev->phylink = NULL; + } +} + static int lan78xx_phy_init(struct lan78xx_net *dev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; - int ret; - u32 mii_adv; struct phy_device *phydev; + int ret; phydev = lan78xx_get_phy(dev); + /* phydev can be NULL if no PHY is found and the chip is LAN7801, + * which will use a fixed link later. + * If an error occurs, return the error code immediately. + */ if (IS_ERR(phydev)) return PTR_ERR(phydev); + ret = lan78xx_phylink_setup(dev); + if (ret < 0) + return ret; + + /* If no PHY is found, set up a fixed link. It is very specific to + * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where + * LAN7801 acts as a USB-to-Ethernet interface to a switch without + * a visible PHY. + */ + if (!phydev) { + ret = lan78xx_set_fixed_link(dev); + if (ret < 0) + goto phylink_uninit; + } + ret = lan78xx_mac_prepare_for_phy(dev); if (ret < 0) - goto free_phy; + goto phylink_uninit; /* if phyirq is not set, use polling mode in phylib */ if (dev->domain_data.phyirq > 0) @@ -2824,54 +2853,21 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) phydev->irq = PHY_POLL; netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); - /* set to AUTOMDIX */ - phydev->mdix = ETH_TP_MDI_AUTO; - - ret = phy_connect_direct(dev->net, phydev, - lan78xx_link_status_change, - dev->interface); + ret = phylink_connect_phy(dev->phylink, phydev); if (ret) { - netdev_err(dev->net, "can't attach PHY to %s\n", - dev->mdiobus->id); - if (dev->chipid == ID_REV_CHIP_ID_7801_) { - if (phy_is_pseudo_fixed_link(phydev)) { - fixed_phy_unregister(phydev); - phy_device_free(phydev); - } - } - return -EIO; + netdev_err(dev->net, "can't attach PHY to %s, error %pe\n", + dev->mdiobus->id, ERR_PTR(ret)); + goto phylink_uninit; } - /* MAC doesn't support 1000T Half */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* support both flow controls */ - dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - mii_adv_to_linkmode_adv_t(fc, mii_adv); - linkmode_or(phydev->advertising, fc, phydev->advertising); - - phy_support_eee(phydev); - ret = lan78xx_configure_leds_from_dt(dev, phydev); - if (ret) - goto free_phy; - - genphy_config_aneg(phydev); - - dev->fc_autoneg = phydev->autoneg; + if (ret < 0) + goto phylink_uninit; return 0; -free_phy: - if (phy_is_pseudo_fixed_link(phydev)) { - fixed_phy_unregister(phydev); - phy_device_free(phydev); - } +phylink_uninit: + lan78xx_phy_uninit(dev); return ret; } @@ -3212,7 +3208,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) unsigned long timeout; int ret; u32 buf; - u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) @@ -3369,22 +3364,12 @@ static int lan78xx_reset(struct lan78xx_net *dev) if (ret < 0) return ret; + buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_); + /* LAN7801 only has RGMII mode */ - if (dev->chipid == ID_REV_CHIP_ID_7801_) { + if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - /* Enable Auto Duplex and Auto speed */ - buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; - } - if (dev->chipid == ID_REV_CHIP_ID_7800_ || - dev->chipid == ID_REV_CHIP_ID_7850_) { - ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); - if (!ret && sig != EEPROM_INDICATOR) { - /* Implies there is no external eeprom. Set mac speed */ - netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); - buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; - } - } ret = lan78xx_write_reg(dev, MAC_CR, buf); if (ret < 0) return ret; @@ -3434,9 +3419,11 @@ static int lan78xx_open(struct net_device *net) mutex_lock(&dev->dev_mutex); - phy_start(net->phydev); + lan78xx_init_stats(dev); - netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); + napi_enable(&dev->napi); + + set_bit(EVENT_DEV_OPEN, &dev->flags); /* for Link Check */ if (dev->urb_intr) { @@ -3448,31 +3435,8 @@ static int lan78xx_open(struct net_device *net) } } - ret = lan78xx_flush_rx_fifo(dev); - if (ret < 0) - goto done; - ret = lan78xx_flush_tx_fifo(dev); - if (ret < 0) - goto done; + phylink_start(dev->phylink); - ret = lan78xx_start_tx_path(dev); - if (ret < 0) - goto done; - ret = lan78xx_start_rx_path(dev); - if (ret < 0) - goto done; - - lan78xx_init_stats(dev); - - set_bit(EVENT_DEV_OPEN, &dev->flags); - - netif_start_queue(net); - - dev->link_on = false; - - napi_enable(&dev->napi); - - lan78xx_defer_kevent(dev, EVENT_LINK_RESET); done: mutex_unlock(&dev->dev_mutex); @@ -3530,7 +3494,6 @@ static int lan78xx_stop(struct net_device *net) timer_delete_sync(&dev->stat_monitor); clear_bit(EVENT_DEV_OPEN, &dev->flags); - netif_stop_queue(net); napi_disable(&dev->napi); lan78xx_terminate_urbs(dev); @@ -3540,12 +3503,7 @@ static int lan78xx_stop(struct net_device *net) net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); - /* ignore errors that occur stopping the Tx and Rx data paths */ - lan78xx_stop_tx_path(dev); - lan78xx_stop_rx_path(dev); - - if (net->phydev) - phy_stop(net->phydev); + phylink_stop(dev->phylink); usb_kill_urb(dev->urb_intr); @@ -3555,7 +3513,7 @@ static int lan78xx_stop(struct net_device *net) */ clear_bit(EVENT_TX_HALT, &dev->flags); clear_bit(EVENT_RX_HALT, &dev->flags); - clear_bit(EVENT_LINK_RESET, &dev->flags); + clear_bit(EVENT_PHY_INT_ACK, &dev->flags); clear_bit(EVENT_STAT_UPDATE, &dev->flags); cancel_delayed_work_sync(&dev->wq); @@ -4479,14 +4437,14 @@ static void lan78xx_delayedwork(struct work_struct *work) } } - if (test_bit(EVENT_LINK_RESET, &dev->flags)) { + if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) { int ret = 0; - clear_bit(EVENT_LINK_RESET, &dev->flags); - if (lan78xx_link_reset(dev) < 0) { - netdev_info(dev->net, "link reset failed (%d)\n", - ret); - } + clear_bit(EVENT_PHY_INT_ACK, &dev->flags); + ret = lan78xx_phy_int_ack(dev); + if (ret) + netdev_info(dev->net, "PHY INT ack failed (%pe)\n", + ERR_PTR(ret)); } if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) { @@ -4560,32 +4518,29 @@ static void lan78xx_disconnect(struct usb_interface *intf) struct lan78xx_net *dev; struct usb_device *udev; struct net_device *net; - struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; - netif_napi_del(&dev->napi); - udev = interface_to_usbdev(intf); net = dev->net; + rtnl_lock(); + phylink_stop(dev->phylink); + phylink_disconnect_phy(dev->phylink); + rtnl_unlock(); + + netif_napi_del(&dev->napi); + unregister_netdev(net); timer_shutdown_sync(&dev->stat_monitor); set_bit(EVENT_DEV_DISCONNECT, &dev->flags); cancel_delayed_work_sync(&dev->wq); - phydev = net->phydev; - - phy_disconnect(net->phydev); - - if (phy_is_pseudo_fixed_link(phydev)) { - fixed_phy_unregister(phydev); - phy_device_free(phydev); - } + phylink_destroy(dev->phylink); usb_scuttle_anchored_urbs(&dev->deferred); @@ -4669,7 +4624,6 @@ static int lan78xx_probe(struct usb_interface *intf, goto out1; } - /* netdev_printk() needs this */ SET_NETDEV_DEV(netdev, &intf->dev); dev = netdev_priv(netdev); @@ -4788,7 +4742,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out8; + goto phy_uninit; } usb_set_intfdata(intf, dev); @@ -4803,8 +4757,8 @@ static int lan78xx_probe(struct usb_interface *intf, return 0; -out8: - phy_disconnect(netdev->phydev); +phy_uninit: + lan78xx_phy_uninit(dev); free_urbs: usb_free_urb(dev->urb_intr); out5: @@ -5139,6 +5093,10 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) spin_unlock_irq(&dev->txq.lock); } + rtnl_lock(); + phylink_suspend(dev->phylink, false); + rtnl_unlock(); + /* stop RX */ ret = lan78xx_stop_rx_path(dev); if (ret < 0) @@ -5366,11 +5324,15 @@ static int lan78xx_reset_resume(struct usb_interface *intf) if (ret < 0) return ret; - phy_start(dev->net->phydev); - ret = lan78xx_resume(intf); + if (ret < 0) + return ret; - return ret; + rtnl_lock(); + phylink_resume(dev->phylink); + rtnl_unlock(); + + return 0; } static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index c04e715a4c2a..9564478a79cc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, __skb_queue_tail(&dev->done, skb); if (dev->done.qlen == 1) - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); spin_unlock(&dev->done.lock); spin_unlock_irqrestore(&list->lock, flags); return old_state; @@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); break; case 0: __usbnet_queue_skb(&dev->rxq, skb, rx_start); @@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev) num++; } - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); netif_dbg(dev, rx_status, dev->net, "paused rx queue disabled, %d skbs requeued\n", num); @@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev) { if (netif_running(dev->net)) { (void) unlink_urbs (dev, &dev->rxq); - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); @@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net) /* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; timer_delete_sync(&dev->delay); - tasklet_kill(&dev->bh); + disable_work_sync(&dev->bh_work); cancel_work_sync(&dev->kevent); /* We have cyclic dependencies. Those calls are needed * to break a cycle. We cannot fall into the gaps because * we have a flag */ - tasklet_kill(&dev->bh); + disable_work_sync(&dev->bh_work); timer_delete_sync(&dev->delay); cancel_work_sync(&dev->kevent); @@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net) clear_bit(EVENT_RX_KILL, &dev->flags); // delay posting reads until we're fully open - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); if (info->manage_power) { retval = info->manage_power(dev, 1); if (retval < 0) { @@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev) */ } else { /* submitting URBs for reading packets */ - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } /* hard_mtu or rx_urb_size may change during link change */ @@ -1198,11 +1198,11 @@ fail_halt: } else { clear_bit (EVENT_RX_HALT, &dev->flags); if (!usbnet_going_away(dev)) - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } - /* tasklet could resubmit itself forever if memory is tight */ + /* work could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; @@ -1224,7 +1224,7 @@ fail_halt: fail_lowmem: if (resched) if (!usbnet_going_away(dev)) - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } @@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue) struct usbnet *dev = netdev_priv(net); unlink_urbs (dev, &dev->txq); - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); /* this needs to be handled individually because the generic layer * doesn't know what is sufficient and could not restore private * information if a remedy of an unconditional reset were used. @@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb) /*-------------------------------------------------------------------------*/ -// tasklet (work deferred from completions, in_irq) or timer +// work (work deferred from completions, in_irq) or timer static void usbnet_bh (struct timer_list *t) { @@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t) "rxqlen %d --> %d\n", temp, dev->rxq.qlen); if (dev->rxq.qlen < RX_QLEN(dev)) - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } if (dev->txq.qlen < TX_QLEN (dev)) netif_wake_queue (dev->net); } } -static void usbnet_bh_tasklet(struct tasklet_struct *t) +static void usbnet_bh_work(struct work_struct *work) { - struct usbnet *dev = from_tasklet(dev, t, bh); + struct usbnet *dev = from_work(dev, work, bh_work); usbnet_bh(&dev->delay); } @@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); skb_queue_head_init(&dev->rxq_pause); - tasklet_setup(&dev->bh, usbnet_bh_tasklet); + INIT_WORK(&dev->bh_work, usbnet_bh_work); INIT_WORK (&dev->kevent, usbnet_deferred_kevent); init_usb_anchor(&dev->deferred); timer_setup(&dev->delay, usbnet_bh, 0); @@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf) if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index e53ba600605a..07e41dce4203 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -4193,8 +4193,11 @@ static void virtnet_init_default_rss(struct virtnet_info *vi) netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); } -static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) +static int virtnet_get_hashflow(struct net_device *dev, + struct ethtool_rxfh_fields *info) { + struct virtnet_info *vi = netdev_priv(dev); + info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: @@ -4243,17 +4246,22 @@ static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_r info->data = 0; break; } + + return 0; } -static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) +static int virtnet_set_hashflow(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) { + struct virtnet_info *vi = netdev_priv(dev); u32 new_hashtypes = vi->rss_hash_types_saved; bool is_disable = info->data & RXH_DISCARD; bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); /* supports only 'sd', 'sdfn' and 'r' */ if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) - return false; + return -EINVAL; switch (info->flow_type) { case TCP_V4_FLOW: @@ -4292,21 +4300,22 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc * break; default: /* unsupported flow */ - return false; + return -EINVAL; } /* if unsupported hashtype was set */ if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) - return false; + return -EINVAL; if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); if (vi->dev->features & NETIF_F_RXHASH) - return virtnet_commit_rss_command(vi); + if (!virtnet_commit_rss_command(vi)) + return -EINVAL; } - return true; + return 0; } static void virtnet_get_drvinfo(struct net_device *dev, @@ -5540,27 +5549,6 @@ static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = vi->curr_queue_pairs; break; - case ETHTOOL_GRXFH: - virtnet_get_hashflow(vi, info); - break; - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - -static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) -{ - struct virtnet_info *vi = netdev_priv(dev); - int rc = 0; - - switch (info->cmd) { - case ETHTOOL_SRXFH: - if (!virtnet_set_hashflow(vi, info)) - rc = -EINVAL; - - break; default: rc = -EOPNOTSUPP; } @@ -5591,8 +5579,9 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, .get_rxfh = virtnet_get_rxfh, .set_rxfh = virtnet_set_rxfh, + .get_rxfh_fields = virtnet_get_hashflow, + .set_rxfh_fields = virtnet_set_hashflow, .get_rxnfc = virtnet_get_rxnfc, - .set_rxnfc = virtnet_set_rxnfc, }; static void virtnet_get_queue_stats_rx(struct net_device *dev, int i, diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 471f91c4204a..cc4d7573839d 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -833,11 +833,19 @@ out: } static int -vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, - struct ethtool_rxnfc *info) +vmxnet3_get_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxfh_fields *info) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); enum Vmxnet3_RSSField rss_fields; + if (!VMXNET3_VERSION_GE_4(adapter)) + return -EOPNOTSUPP; +#ifdef VMXNET3_RSS + if (!adapter->rss) + return -EOPNOTSUPP; +#endif + if (netif_running(adapter->netdev)) { unsigned long flags; @@ -900,10 +908,20 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, static int vmxnet3_set_rss_hash_opt(struct net_device *netdev, - struct vmxnet3_adapter *adapter, - struct ethtool_rxnfc *nfc) + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { - enum Vmxnet3_RSSField rss_fields = adapter->rss_fields; + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + enum Vmxnet3_RSSField rss_fields; + + if (!VMXNET3_VERSION_GE_4(adapter)) + return -EOPNOTSUPP; +#ifdef VMXNET3_RSS + if (!adapter->rss) + return -EOPNOTSUPP; +#endif + + rss_fields = adapter->rss_fields; /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -1074,54 +1092,11 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; break; - case ETHTOOL_GRXFH: - if (!VMXNET3_VERSION_GE_4(adapter)) { - err = -EOPNOTSUPP; - break; - } -#ifdef VMXNET3_RSS - if (!adapter->rss) { - err = -EOPNOTSUPP; - break; - } -#endif - err = vmxnet3_get_rss_hash_opts(adapter, info); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int -vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) -{ - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - int err = 0; - - if (!VMXNET3_VERSION_GE_4(adapter)) { - err = -EOPNOTSUPP; - goto done; - } -#ifdef VMXNET3_RSS - if (!adapter->rss) { - err = -EOPNOTSUPP; - goto done; - } -#endif - - switch (info->cmd) { - case ETHTOOL_SRXFH: - err = vmxnet3_set_rss_hash_opt(netdev, adapter, info); - break; default: err = -EOPNOTSUPP; break; } -done: return err; } @@ -1361,12 +1336,13 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, - .set_rxnfc = vmxnet3_set_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh = vmxnet3_get_rss, .set_rxfh = vmxnet3_set_rss, #endif + .get_rxfh_fields = vmxnet3_get_rss_hash_opts, + .set_rxfh_fields = vmxnet3_set_rss_hash_opt, .get_link_ksettings = vmxnet3_get_link_ksettings, .get_channels = vmxnet3_get_channels, }; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 97792de896b7..bcde95cb2a2e 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1485,21 +1485,18 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev, static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) { - struct vxlan_net *vn; + ASSERT_RTNL(); if (!vs) return false; if (!refcount_dec_and_test(&vs->refcnt)) return false; - vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); - spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); udp_tunnel_notify_del_rx_port(vs->sock, (vs->flags & VXLAN_F_GPE) ? UDP_TUNNEL_TYPE_VXLAN_GPE : UDP_TUNNEL_TYPE_VXLAN); - spin_unlock(&vn->sock_lock); return true; } @@ -2451,6 +2448,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rcu_read_lock(); if (addr_family == AF_INET) { struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); + u16 ipcb_flags = 0; struct rtable *rt; __be16 df = 0; __be32 saddr; @@ -2467,6 +2465,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } + if (flags & VXLAN_F_MC_ROUTE) + ipcb_flags |= IPSKB_MCROUTE; + if (!info) { /* Bypass encapsulation if the destination is local */ err = encap_bypass_if_local(skb, dev, vxlan, AF_INET, @@ -2522,11 +2523,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, pkey->u.ipv4.dst, tos, ttl, df, - src_port, dst_port, xnet, !udp_sum); + src_port, dst_port, xnet, !udp_sum, + ipcb_flags); #if IS_ENABLED(CONFIG_IPV6) } else { struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct in6_addr saddr; + u16 ip6cb_flags = 0; if (!ifindex) ifindex = sock6->sock->sk->sk_bound_dev_if; @@ -2542,6 +2545,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } + if (flags & VXLAN_F_MC_ROUTE) + ip6cb_flags |= IP6SKB_MCROUTE; + if (!info) { u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags; @@ -2586,7 +2592,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, &saddr, &pkey->u.ipv6.dst, tos, ttl, - pkey->label, src_port, dst_port, !udp_sum); + pkey->label, src_port, dst_port, !udp_sum, + ip6cb_flags); #endif } vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); @@ -2847,26 +2854,23 @@ static void vxlan_cleanup(struct timer_list *t) static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) { - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); + ASSERT_RTNL(); - spin_lock(&vn->sock_lock); hlist_del_init_rcu(&vxlan->hlist4.hlist); #if IS_ENABLED(CONFIG_IPV6) hlist_del_init_rcu(&vxlan->hlist6.hlist); #endif - spin_unlock(&vn->sock_lock); } static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, struct vxlan_dev_node *node) { - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); __be32 vni = vxlan->default_dst.remote_vni; + ASSERT_RTNL(); + node->vxlan = vxlan; - spin_lock(&vn->sock_lock); hlist_add_head_rcu(&node->hlist, vni_head(vs, vni)); - spin_unlock(&vn->sock_lock); } /* Setup stats when device is created */ @@ -3291,9 +3295,10 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push) struct vxlan_net *vn = net_generic(net, vxlan_net_id); unsigned int i; - spin_lock(&vn->sock_lock); + ASSERT_RTNL(); + for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { + hlist_for_each_entry(vs, &vn->sock_list[i], hlist) { unsigned short type; if (vs->flags & VXLAN_F_GPE) @@ -3307,7 +3312,6 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push) udp_tunnel_drop_rx_port(dev, vs->sock, type); } } - spin_unlock(&vn->sock_lock); } /* Initialize the device structure. */ @@ -3401,6 +3405,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1), [IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX), [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)), + [IFLA_VXLAN_MC_ROUTE] = NLA_POLICY_MAX(NLA_U8, 1), }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -3537,12 +3542,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, __be16 port, u32 flags, int ifindex) { - struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; struct socket *sock; unsigned int h; struct udp_tunnel_sock_cfg tunnel_cfg; + ASSERT_RTNL(); + vs = kzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) return ERR_PTR(-ENOMEM); @@ -3560,13 +3566,11 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, refcount_set(&vs->refcnt, 1); vs->flags = (flags & VXLAN_F_RCV_FLAGS); - spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); udp_tunnel_notify_add_rx_port(sock, (vs->flags & VXLAN_F_GPE) ? UDP_TUNNEL_TYPE_VXLAN_GPE : UDP_TUNNEL_TYPE_VXLAN); - spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); @@ -3590,26 +3594,27 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) { - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; struct vxlan_sock *vs = NULL; struct vxlan_dev_node *node; int l3mdev_index = 0; + ASSERT_RTNL(); + if (vxlan->cfg.remote_ifindex) l3mdev_index = l3mdev_master_upper_ifindex_by_index( vxlan->net, vxlan->cfg.remote_ifindex); if (!vxlan->cfg.no_share) { - spin_lock(&vn->sock_lock); + rcu_read_lock(); vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, vxlan->cfg.dst_port, vxlan->cfg.flags, l3mdev_index); if (vs && !refcount_inc_not_zero(&vs->refcnt)) { - spin_unlock(&vn->sock_lock); + rcu_read_unlock(); return -EBUSY; } - spin_unlock(&vn->sock_lock); + rcu_read_unlock(); } if (!vs) vs = vxlan_socket_create(vxlan->net, ipv6, @@ -4314,6 +4319,14 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], return err; } + if (data[IFLA_VXLAN_MC_ROUTE]) { + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_MC_ROUTE, + VXLAN_F_MC_ROUTE, changelink, + true, extack); + if (err) + return err; + } + if (tb[IFLA_MTU]) { if (changelink) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], @@ -4875,7 +4888,6 @@ static __net_init int vxlan_init_net(struct net *net) unsigned int h; INIT_LIST_HEAD(&vn->vxlan_list); - spin_lock_init(&vn->sock_lock); vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event; for (h = 0; h < PORT_HASH_SIZE; ++h) diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h index d328aed9feef..6c625fb29c6c 100644 --- a/drivers/net/vxlan/vxlan_private.h +++ b/drivers/net/vxlan/vxlan_private.h @@ -19,8 +19,8 @@ extern const struct rhashtable_params vxlan_vni_rht_params; /* per-network namespace private data for this module */ struct vxlan_net { struct list_head vxlan_list; + /* sock_list is protected by rtnl lock */ struct hlist_head sock_list[PORT_HASH_SIZE]; - spinlock_t sock_lock; struct notifier_block nexthop_notifier_block; }; diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 186d0660669a..4ff56d9f8f28 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -40,11 +40,11 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan, struct vxlan_vni_node *v, bool del) { - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_dev_node *node; struct vxlan_sock *vs; - spin_lock(&vn->sock_lock); + ASSERT_RTNL(); + if (del) { if (!hlist_unhashed(&v->hlist4.hlist)) hlist_del_init_rcu(&v->hlist4.hlist); @@ -52,7 +52,7 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan, if (!hlist_unhashed(&v->hlist6.hlist)) hlist_del_init_rcu(&v->hlist6.hlist); #endif - goto out; + return; } #if IS_ENABLED(CONFIG_IPV6) @@ -67,23 +67,21 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan, node = &v->hlist4; hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni)); } -out: - spin_unlock(&vn->sock_lock); } void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan, struct vxlan_sock *vs, bool ipv6) { - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); struct vxlan_vni_node *v, *tmp; struct vxlan_dev_node *node; + ASSERT_RTNL(); + if (!vg) return; - spin_lock(&vn->sock_lock); list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { #if IS_ENABLED(CONFIG_IPV6) if (ipv6) @@ -94,26 +92,24 @@ void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan, node->vxlan = vxlan; hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni)); } - spin_unlock(&vn->sock_lock); } void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan) { struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp); - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_vni_node *v, *tmp; + ASSERT_RTNL(); + if (!vg) return; - spin_lock(&vn->sock_lock); list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) { hlist_del_init_rcu(&v->hlist4.hlist); #if IS_ENABLED(CONFIG_IPV6) hlist_del_init_rcu(&v->hlist6.hlist); #endif } - spin_unlock(&vn->sock_lock); } static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode, diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 0414d7a6ce74..253488f8c00f 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -84,7 +84,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, skb->ignore_df = 1; udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, - fl.fl4_dport, false, false); + fl.fl4_dport, false, false, 0); goto out; err: @@ -151,7 +151,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, skb->ignore_df = 1; udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, ip6_dst_hoplimit(dst), 0, fl.fl6_sport, - fl.fl6_dport, false); + fl.fl6_dport, false, 0); goto out; err: diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index ebc0560d40e3..89ae80934b30 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1216,6 +1216,7 @@ void ath12k_fw_stats_init(struct ath12k *ar) INIT_LIST_HEAD(&ar->fw_stats.pdevs); INIT_LIST_HEAD(&ar->fw_stats.bcn); init_completion(&ar->fw_stats_complete); + init_completion(&ar->fw_stats_done); } void ath12k_fw_stats_free(struct ath12k_fw_stats *stats) @@ -1228,8 +1229,9 @@ void ath12k_fw_stats_free(struct ath12k_fw_stats *stats) void ath12k_fw_stats_reset(struct ath12k *ar) { spin_lock_bh(&ar->data_lock); - ar->fw_stats.fw_stats_done = false; ath12k_fw_stats_free(&ar->fw_stats); + ar->fw_stats.num_vdev_recvd = 0; + ar->fw_stats.num_bcn_recvd = 0; spin_unlock_bh(&ar->data_lock); } diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 941db6e49d6e..7bcd9c70309f 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -601,6 +601,12 @@ struct ath12k_sta { #define ATH12K_NUM_CHANS 101 #define ATH12K_MAX_5GHZ_CHAN 173 +static inline bool ath12k_is_2ghz_channel_freq(u32 freq) +{ + return freq >= ATH12K_MIN_2GHZ_FREQ && + freq <= ATH12K_MAX_2GHZ_FREQ; +} + enum ath12k_hw_state { ATH12K_HW_STATE_OFF, ATH12K_HW_STATE_ON, @@ -626,7 +632,8 @@ struct ath12k_fw_stats { struct list_head pdevs; struct list_head vdevs; struct list_head bcn; - bool fw_stats_done; + u32 num_vdev_recvd; + u32 num_bcn_recvd; }; struct ath12k_dbg_htt_stats { @@ -806,6 +813,7 @@ struct ath12k { bool regdom_set_by_user; struct completion fw_stats_complete; + struct completion fw_stats_done; struct completion mlo_setup_done; u32 mlo_setup_status; diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index dd624d73b8b2..23da93afaa5c 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -1251,64 +1251,6 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) */ } -void -ath12k_debugfs_fw_stats_process(struct ath12k *ar, - struct ath12k_fw_stats *stats) -{ - struct ath12k_base *ab = ar->ab; - struct ath12k_pdev *pdev; - bool is_end; - static unsigned int num_vdev, num_bcn; - size_t total_vdevs_started = 0; - int i; - - if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { - if (list_empty(&stats->vdevs)) { - ath12k_warn(ab, "empty vdev stats"); - return; - } - /* FW sends all the active VDEV stats irrespective of PDEV, - * hence limit until the count of all VDEVs started - */ - rcu_read_lock(); - for (i = 0; i < ab->num_radios; i++) { - pdev = rcu_dereference(ab->pdevs_active[i]); - if (pdev && pdev->ar) - total_vdevs_started += pdev->ar->num_started_vdevs; - } - rcu_read_unlock(); - - is_end = ((++num_vdev) == total_vdevs_started); - - list_splice_tail_init(&stats->vdevs, - &ar->fw_stats.vdevs); - - if (is_end) { - ar->fw_stats.fw_stats_done = true; - num_vdev = 0; - } - return; - } - if (stats->stats_id == WMI_REQUEST_BCN_STAT) { - if (list_empty(&stats->bcn)) { - ath12k_warn(ab, "empty beacon stats"); - return; - } - /* Mark end until we reached the count of all started VDEVs - * within the PDEV - */ - is_end = ((++num_bcn) == ar->num_started_vdevs); - - list_splice_tail_init(&stats->bcn, - &ar->fw_stats.bcn); - - if (is_end) { - ar->fw_stats.fw_stats_done = true; - num_bcn = 0; - } - } -} - static int ath12k_open_vdev_stats(struct inode *inode, struct file *file) { struct ath12k *ar = inode->i_private; diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h index ebef7dace344..21641a8a0346 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.h +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -12,8 +12,6 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab); void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); void ath12k_debugfs_register(struct ath12k *ar); void ath12k_debugfs_unregister(struct ath12k *ar); -void ath12k_debugfs_fw_stats_process(struct ath12k *ar, - struct ath12k_fw_stats *stats); void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void ath12k_debugfs_pdev_create(struct ath12k_base *ab); @@ -126,11 +124,6 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar) { } -static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar, - struct ath12k_fw_stats *stats) -{ -} - static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) { return false; diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 88b59f3ff87a..59ec422992d3 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -4360,7 +4360,7 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_hw *ah = ath12k_ar_to_ah(ar); - unsigned long timeout, time_left; + unsigned long time_left; int ret; guard(mutex)(&ah->hw_mutex); @@ -4368,19 +4368,13 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar, if (ah->state != ATH12K_HW_STATE_ON) return -ENETDOWN; - /* FW stats can get split when exceeding the stats data buffer limit. - * In that case, since there is no end marking for the back-to-back - * received 'update stats' event, we keep a 3 seconds timeout in case, - * fw_stats_done is not marked yet - */ - timeout = jiffies + msecs_to_jiffies(3 * 1000); ath12k_fw_stats_reset(ar); reinit_completion(&ar->fw_stats_complete); + reinit_completion(&ar->fw_stats_done); ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id, param->vdev_id, param->pdev_id); - if (ret) { ath12k_warn(ab, "failed to request fw stats: %d\n", ret); return ret; @@ -4391,7 +4385,6 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar, param->pdev_id, param->vdev_id, param->stats_id); time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); - if (!time_left) { ath12k_warn(ab, "time out while waiting for get fw stats\n"); return -ETIMEDOUT; @@ -4400,20 +4393,15 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar, /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back * when stats data buffer limit is reached. fw_stats_complete * is completed once host receives first event from firmware, but - * still end might not be marked in the TLV. - * Below loop is to confirm that firmware completed sending all the event - * and fw_stats_done is marked true when end is marked in the TLV. + * still there could be more events following. Below is to wait + * until firmware completes sending all the events. */ - for (;;) { - if (time_after(jiffies, timeout)) - break; - spin_lock_bh(&ar->data_lock); - if (ar->fw_stats.fw_stats_done) { - spin_unlock_bh(&ar->data_lock); - break; - } - spin_unlock_bh(&ar->data_lock); + time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); + if (!time_left) { + ath12k_warn(ab, "time out while waiting for fw stats done\n"); + return -ETIMEDOUT; } + return 0; } @@ -5890,6 +5878,327 @@ exit: return ret; } +static bool ath12k_mac_is_freq_on_mac(struct ath12k_hw_mode_freq_range_arg *freq_range, + u32 freq, u8 mac_id) +{ + return (freq >= freq_range[mac_id].low_2ghz_freq && + freq <= freq_range[mac_id].high_2ghz_freq) || + (freq >= freq_range[mac_id].low_5ghz_freq && + freq <= freq_range[mac_id].high_5ghz_freq); +} + +static bool +ath12k_mac_2_freq_same_mac_in_freq_range(struct ath12k_base *ab, + struct ath12k_hw_mode_freq_range_arg *freq_range, + u32 freq_link1, u32 freq_link2) +{ + u8 i; + + for (i = 0; i < MAX_RADIOS; i++) { + if (ath12k_mac_is_freq_on_mac(freq_range, freq_link1, i) && + ath12k_mac_is_freq_on_mac(freq_range, freq_link2, i)) + return true; + } + + return false; +} + +static bool ath12k_mac_is_hw_dbs_capable(struct ath12k_base *ab) +{ + return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, + ab->wmi_ab.svc_map) && + ab->wmi_ab.hw_mode_info.support_dbs; +} + +static bool ath12k_mac_2_freq_same_mac_in_dbs(struct ath12k_base *ab, + u32 freq_link1, u32 freq_link2) +{ + struct ath12k_hw_mode_freq_range_arg *freq_range; + + if (!ath12k_mac_is_hw_dbs_capable(ab)) + return true; + + freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[ATH12K_HW_MODE_DBS]; + return ath12k_mac_2_freq_same_mac_in_freq_range(ab, freq_range, + freq_link1, freq_link2); +} + +static bool ath12k_mac_is_hw_sbs_capable(struct ath12k_base *ab) +{ + return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, + ab->wmi_ab.svc_map) && + ab->wmi_ab.hw_mode_info.support_sbs; +} + +static bool ath12k_mac_2_freq_same_mac_in_sbs(struct ath12k_base *ab, + u32 freq_link1, u32 freq_link2) +{ + struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *sbs_uppr_share; + struct ath12k_hw_mode_freq_range_arg *sbs_low_share; + struct ath12k_hw_mode_freq_range_arg *sbs_range; + + if (!ath12k_mac_is_hw_sbs_capable(ab)) + return true; + + if (ab->wmi_ab.sbs_lower_band_end_freq) { + sbs_uppr_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; + sbs_low_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; + + return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_low_share, + freq_link1, freq_link2) || + ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_uppr_share, + freq_link1, freq_link2); + } + + sbs_range = info->freq_range_caps[ATH12K_HW_MODE_SBS]; + return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_range, + freq_link1, freq_link2); +} + +static bool ath12k_mac_freqs_on_same_mac(struct ath12k_base *ab, + u32 freq_link1, u32 freq_link2) +{ + return ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_link1, freq_link2) && + ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_link1, freq_link2); +} + +static int ath12k_mac_mlo_sta_set_link_active(struct ath12k_base *ab, + enum wmi_mlo_link_force_reason reason, + enum wmi_mlo_link_force_mode mode, + u8 *mlo_vdev_id_lst, + u8 num_mlo_vdev, + u8 *mlo_inactive_vdev_lst, + u8 num_mlo_inactive_vdev) +{ + struct wmi_mlo_link_set_active_arg param = {0}; + u32 entry_idx, entry_offset, vdev_idx; + u8 vdev_id; + + param.reason = reason; + param.force_mode = mode; + + for (vdev_idx = 0; vdev_idx < num_mlo_vdev; vdev_idx++) { + vdev_id = mlo_vdev_id_lst[vdev_idx]; + entry_idx = vdev_id / 32; + entry_offset = vdev_id % 32; + if (entry_idx >= WMI_MLO_LINK_NUM_SZ) { + ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d", + entry_idx, num_mlo_vdev, vdev_id); + return -EINVAL; + } + param.vdev_bitmap[entry_idx] |= 1 << entry_offset; + /* update entry number if entry index changed */ + if (param.num_vdev_bitmap < entry_idx + 1) + param.num_vdev_bitmap = entry_idx + 1; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "num_vdev_bitmap %d vdev_bitmap[0] = 0x%x, vdev_bitmap[1] = 0x%x", + param.num_vdev_bitmap, param.vdev_bitmap[0], param.vdev_bitmap[1]); + + if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { + for (vdev_idx = 0; vdev_idx < num_mlo_inactive_vdev; vdev_idx++) { + vdev_id = mlo_inactive_vdev_lst[vdev_idx]; + entry_idx = vdev_id / 32; + entry_offset = vdev_id % 32; + if (entry_idx >= WMI_MLO_LINK_NUM_SZ) { + ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d", + entry_idx, num_mlo_inactive_vdev, vdev_id); + return -EINVAL; + } + param.inactive_vdev_bitmap[entry_idx] |= 1 << entry_offset; + /* update entry number if entry index changed */ + if (param.num_inactive_vdev_bitmap < entry_idx + 1) + param.num_inactive_vdev_bitmap = entry_idx + 1; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "num_vdev_bitmap %d inactive_vdev_bitmap[0] = 0x%x, inactive_vdev_bitmap[1] = 0x%x", + param.num_inactive_vdev_bitmap, + param.inactive_vdev_bitmap[0], + param.inactive_vdev_bitmap[1]); + } + + if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM || + mode == WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM) { + param.num_link_entry = 1; + param.link_num[0].num_of_link = num_mlo_vdev - 1; + } + + return ath12k_wmi_send_mlo_link_set_active_cmd(ab, ¶m); +} + +static int ath12k_mac_mlo_sta_update_link_active(struct ath12k_base *ab, + struct ieee80211_hw *hw, + struct ath12k_vif *ahvif) +{ + u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {0}; + u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {0}; + unsigned long links = ahvif->links_map; + enum wmi_mlo_link_force_reason reason; + struct ieee80211_chanctx_conf *conf; + enum wmi_mlo_link_force_mode mode; + struct ieee80211_bss_conf *info; + struct ath12k_link_vif *arvif; + u8 num_mlo_vdev = 0; + u8 link_id; + + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + /* make sure vdev is created on this device */ + if (!arvif || !arvif->is_created || arvif->ar->ab != ab) + continue; + + info = ath12k_mac_get_link_bss_conf(arvif); + conf = wiphy_dereference(hw->wiphy, info->chanctx_conf); + mlo_freq_list[num_mlo_vdev] = conf->def.chan->center_freq; + + mlo_vdev_id_lst[num_mlo_vdev] = arvif->vdev_id; + num_mlo_vdev++; + } + + /* It is not allowed to activate more links than a single device + * supported. Something goes wrong if we reach here. + */ + if (num_mlo_vdev > ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + /* if 2 links are established and both link channels fall on the + * same hardware MAC, send command to firmware to deactivate one + * of them. + */ + if (num_mlo_vdev == 2 && + ath12k_mac_freqs_on_same_mac(ab, mlo_freq_list[0], + mlo_freq_list[1])) { + mode = WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM; + reason = WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT; + return ath12k_mac_mlo_sta_set_link_active(ab, reason, mode, + mlo_vdev_id_lst, num_mlo_vdev, + NULL, 0); + } + + return 0; +} + +static bool ath12k_mac_are_sbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2) +{ + if (!ath12k_mac_is_hw_sbs_capable(ab)) + return false; + + if (ath12k_is_2ghz_channel_freq(freq_1) || + ath12k_is_2ghz_channel_freq(freq_2)) + return false; + + return !ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_1, freq_2); +} + +static bool ath12k_mac_are_dbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2) +{ + if (!ath12k_mac_is_hw_dbs_capable(ab)) + return false; + + return !ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_1, freq_2); +} + +static int ath12k_mac_select_links(struct ath12k_base *ab, + struct ieee80211_vif *vif, + struct ieee80211_hw *hw, + u16 *selected_links) +{ + unsigned long useful_links = ieee80211_vif_usable_links(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + u8 num_useful_links = hweight_long(useful_links); + struct ieee80211_chanctx_conf *chanctx; + struct ath12k_link_vif *assoc_arvif; + u32 assoc_link_freq, partner_freq; + u16 sbs_links = 0, dbs_links = 0; + struct ieee80211_bss_conf *info; + struct ieee80211_channel *chan; + struct ieee80211_sta *sta; + struct ath12k_sta *ahsta; + u8 link_id; + + /* activate all useful links if less than max supported */ + if (num_useful_links <= ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) { + *selected_links = useful_links; + return 0; + } + + /* only in station mode we can get here, so it's safe + * to use ap_addr + */ + rcu_read_lock(); + sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); + if (!sta) { + rcu_read_unlock(); + ath12k_warn(ab, "failed to find sta with addr %pM\n", vif->cfg.ap_addr); + return -EINVAL; + } + + ahsta = ath12k_sta_to_ahsta(sta); + assoc_arvif = wiphy_dereference(hw->wiphy, ahvif->link[ahsta->assoc_link_id]); + info = ath12k_mac_get_link_bss_conf(assoc_arvif); + chanctx = rcu_dereference(info->chanctx_conf); + assoc_link_freq = chanctx->def.chan->center_freq; + rcu_read_unlock(); + ath12k_dbg(ab, ATH12K_DBG_MAC, "assoc link %u freq %u\n", + assoc_arvif->link_id, assoc_link_freq); + + /* assoc link is already activated and has to be kept active, + * only need to select a partner link from others. + */ + useful_links &= ~BIT(assoc_arvif->link_id); + for_each_set_bit(link_id, &useful_links, IEEE80211_MLD_MAX_NUM_LINKS) { + info = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); + if (!info) { + ath12k_warn(ab, "failed to get link info for link: %u\n", + link_id); + return -ENOLINK; + } + + chan = info->chanreq.oper.chan; + if (!chan) { + ath12k_warn(ab, "failed to get chan for link: %u\n", link_id); + return -EINVAL; + } + + partner_freq = chan->center_freq; + if (ath12k_mac_are_sbs_chan(ab, assoc_link_freq, partner_freq)) { + sbs_links |= BIT(link_id); + ath12k_dbg(ab, ATH12K_DBG_MAC, "new SBS link %u freq %u\n", + link_id, partner_freq); + continue; + } + + if (ath12k_mac_are_dbs_chan(ab, assoc_link_freq, partner_freq)) { + dbs_links |= BIT(link_id); + ath12k_dbg(ab, ATH12K_DBG_MAC, "new DBS link %u freq %u\n", + link_id, partner_freq); + continue; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, "non DBS/SBS link %u freq %u\n", + link_id, partner_freq); + } + + /* choose the first candidate no matter how many is in the list */ + if (sbs_links) + link_id = __ffs(sbs_links); + else if (dbs_links) + link_id = __ffs(dbs_links); + else + link_id = ffs(useful_links) - 1; + + ath12k_dbg(ab, ATH12K_DBG_MAC, "select partner link %u\n", link_id); + + *selected_links = BIT(assoc_arvif->link_id) | BIT(link_id); + + return 0; +} + static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -5899,10 +6208,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_base *prev_ab = NULL, *ab; struct ath12k_link_vif *arvif; struct ath12k_link_sta *arsta; unsigned long valid_links; - u8 link_id = 0; + u16 selected_links = 0; + u8 link_id = 0, i; + struct ath12k *ar; int ret; lockdep_assert_wiphy(hw->wiphy); @@ -5972,8 +6284,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, * about to move to the associated state. */ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION && - old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) - ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif)); + old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { + /* TODO: for now only do link selection for single device + * MLO case. Other cases would be handled in the future. + */ + ab = ah->radio[0].ab; + if (ab->ag->num_devices == 1) { + ret = ath12k_mac_select_links(ab, vif, hw, &selected_links); + if (ret) { + ath12k_warn(ab, + "failed to get selected links: %d\n", ret); + goto exit; + } + } else { + selected_links = ieee80211_vif_usable_links(vif); + } + + ieee80211_set_active_links(vif, selected_links); + } /* Handle all the other state transitions in generic way */ valid_links = ahsta->links_map; @@ -5997,6 +6325,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, } } + if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION && + old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { + for_each_ar(ah, ar, i) { + ab = ar->ab; + if (prev_ab == ab) + continue; + + ret = ath12k_mac_mlo_sta_update_link_active(ab, hw, ahvif); + if (ret) { + ath12k_warn(ab, + "failed to update link active state on connect %d\n", + ret); + goto exit; + } + + prev_ab = ab; + } + } /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: * Remove the station from driver (handle ML sta here since that * needs special handling. Normal sta will be handled in generic diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index e6e74b45bfa4..cc81b1f5680f 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -54,6 +54,8 @@ struct ath12k_generic_iter { #define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS #define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1) +#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2 + enum ath12k_supported_bw { ATH12K_BW_20 = 0, ATH12K_BW_40 = 1, diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 60e2444fe08c..465f877fc0fb 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -91,6 +91,11 @@ struct ath12k_wmi_svc_rdy_ext2_parse { bool dma_ring_cap_done; bool spectral_bin_scaling_done; bool mac_phy_caps_ext_done; + bool hal_reg_caps_ext2_done; + bool scan_radio_caps_ext2_done; + bool twt_caps_done; + bool htt_msdu_idx_to_qtype_map_done; + bool dbs_or_sbs_cap_ext_done; }; struct ath12k_wmi_rdy_parse { @@ -4395,6 +4400,7 @@ static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, u16 len, const void *ptr, void *data) { + struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info; struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; enum wmi_host_hw_mode_config_type mode, pref; @@ -4427,8 +4433,11 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, } } - ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n", - soc->wmi_ab.preferred_hw_mode); + svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps; + + ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n", + svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode); + if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) return -EINVAL; @@ -4658,6 +4667,65 @@ free_dir_buff: return ret; } +static void +ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab, + const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap, + struct ath12k_svc_ext_mac_phy_info *mac_phy_info) +{ + mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id); + mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands); + mac_phy_info->hw_freq_range.low_2ghz_freq = + __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq); + mac_phy_info->hw_freq_range.high_2ghz_freq = + __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq); + mac_phy_info->hw_freq_range.low_5ghz_freq = + __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq); + mac_phy_info->hw_freq_range.high_5ghz_freq = + __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq); +} + +static void +ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab, + struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext) +{ + struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; + const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap; + const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; + struct ath12k_svc_ext_mac_phy_info *mac_phy_info; + u32 hw_mode_id, phy_bit_map; + u8 hw_idx; + + mac_phy_info = &svc_ext_info->mac_phy_info[0]; + mac_phy_cap = svc_rdy_ext->mac_phy_caps; + + for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) { + hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx]; + hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id); + phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map); + + while (phy_bit_map) { + ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info); + mac_phy_info->hw_mode_config_type = + le32_get_bits(hw_mode_cap->hw_mode_config_type, + WMI_HW_MODE_CAP_CFG_TYPE); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n", + hw_idx, hw_mode_id, + mac_phy_info->hw_mode_config_type, + mac_phy_info->supported_bands, mac_phy_info->phy_id, + mac_phy_info->hw_freq_range.low_2ghz_freq, + mac_phy_info->hw_freq_range.high_2ghz_freq, + mac_phy_info->hw_freq_range.low_5ghz_freq, + mac_phy_info->hw_freq_range.high_5ghz_freq); + + mac_phy_cap++; + mac_phy_info++; + + phy_bit_map >>= 1; + } + } +} + static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *ptr, void *data) @@ -4706,6 +4774,8 @@ static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, return ret; } + ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext); + svc_rdy_ext->mac_phy_done = true; } else if (!svc_rdy_ext->ext_hal_reg_done) { ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); @@ -4922,10 +4992,449 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, return 0; } +static void +ath12k_wmi_update_freq_info(struct ath12k_base *ab, + struct ath12k_svc_ext_mac_phy_info *mac_cap, + enum ath12k_hw_mode mode, + u32 phy_id) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *mac_range; + + mac_range = &hw_mode_info->freq_range_caps[mode][phy_id]; + + if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { + mac_range->low_2ghz_freq = max_t(u32, + mac_cap->hw_freq_range.low_2ghz_freq, + ATH12K_MIN_2GHZ_FREQ); + mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ? + min_t(u32, + mac_cap->hw_freq_range.high_2ghz_freq, + ATH12K_MAX_2GHZ_FREQ) : + ATH12K_MAX_2GHZ_FREQ; + } + + if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { + mac_range->low_5ghz_freq = max_t(u32, + mac_cap->hw_freq_range.low_5ghz_freq, + ATH12K_MIN_5GHZ_FREQ); + mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ? + min_t(u32, + mac_cap->hw_freq_range.high_5ghz_freq, + ATH12K_MAX_6GHZ_FREQ) : + ATH12K_MAX_6GHZ_FREQ; + } +} + +static bool +ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab, + enum ath12k_hw_mode hwmode) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *mac_range; + u8 phy_id; + + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id]; + /* modify SBS/DBS range only when both phy for DBS are filled */ + if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq) + return false; + } + + return true; +} + +static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *mac_range; + u8 phy_id; + + mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS]; + /* Reset 5 GHz range for shared mac for DBS */ + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + if (mac_range[phy_id].low_2ghz_freq && + mac_range[phy_id].low_5ghz_freq) { + mac_range[phy_id].low_5ghz_freq = 0; + mac_range[phy_id].high_5ghz_freq = 0; + } + } +} + +static u32 +ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) +{ + u32 highest_freq = 0; + u8 phy_id; + + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + if (range[phy_id].high_5ghz_freq > highest_freq) + highest_freq = range[phy_id].high_5ghz_freq; + } + + return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ; +} + +static u32 +ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) +{ + u32 lowest_freq = 0; + u8 phy_id; + + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + if ((!lowest_freq && range[phy_id].low_5ghz_freq) || + range[phy_id].low_5ghz_freq < lowest_freq) + lowest_freq = range[phy_id].low_5ghz_freq; + } + + return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ; +} + +static void +ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab, + u16 sbs_range_sep, + struct ath12k_hw_mode_freq_range_arg *ref_freq) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range; + u8 phy_id; + + upper_sbs_freq_range = + hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; + + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + upper_sbs_freq_range[phy_id].low_2ghz_freq = + ref_freq[phy_id].low_2ghz_freq; + upper_sbs_freq_range[phy_id].high_2ghz_freq = + ref_freq[phy_id].high_2ghz_freq; + + /* update for shared mac */ + if (upper_sbs_freq_range[phy_id].low_2ghz_freq) { + upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; + upper_sbs_freq_range[phy_id].high_5ghz_freq = + ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); + } else { + upper_sbs_freq_range[phy_id].low_5ghz_freq = + ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); + upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; + } + } +} + +static void +ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab, + u16 sbs_range_sep, + struct ath12k_hw_mode_freq_range_arg *ref_freq) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range; + u8 phy_id; + + lower_sbs_freq_range = + hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; + + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + lower_sbs_freq_range[phy_id].low_2ghz_freq = + ref_freq[phy_id].low_2ghz_freq; + lower_sbs_freq_range[phy_id].high_2ghz_freq = + ref_freq[phy_id].high_2ghz_freq; + + /* update for shared mac */ + if (lower_sbs_freq_range[phy_id].low_2ghz_freq) { + lower_sbs_freq_range[phy_id].low_5ghz_freq = + ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); + lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; + } else { + lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; + lower_sbs_freq_range[phy_id].high_5ghz_freq = + ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); + } + } +} + +static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode) +{ + static const char * const mode_str[] = { + [ATH12K_HW_MODE_SMM] = "SMM", + [ATH12K_HW_MODE_DBS] = "DBS", + [ATH12K_HW_MODE_SBS] = "SBS", + [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE", + [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE", + }; + + if (hw_mode >= ARRAY_SIZE(mode_str)) + return "Unknown"; + + return mode_str[hw_mode]; +} + +static void +ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab, + struct ath12k_hw_mode_freq_range_arg *freq_range, + enum ath12k_hw_mode hw_mode) +{ + u8 i; + + for (i = 0; i < MAX_RADIOS; i++) + if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq) + ath12k_dbg(ab, ATH12K_DBG_WMI, + "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]", + ath12k_wmi_hw_mode_to_str(hw_mode), + hw_mode, i, + freq_range[i].low_2ghz_freq, + freq_range[i].high_2ghz_freq, + freq_range[i].low_5ghz_freq, + freq_range[i].high_5ghz_freq); +} + +static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab) +{ + struct ath12k_hw_mode_freq_range_arg *freq_range; + u8 i; + + for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) { + freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i]; + ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i); + } +} + +static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range; + struct ath12k_hw_mode_freq_range_arg *non_shared_range; + u8 shared_phy_id; + + sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id]; + + /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id + * keep the range as it is in SBS + */ + if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq) + return 0; + + if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) { + ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz"); + ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS); + return -EINVAL; + } + + non_shared_range = sbs_mac_range; + /* if SBS mac range has only 5 GHz then it's the non-shared phy, so + * modify the range as per the shared mac. + */ + shared_phy_id = phy_id ? 0 : 1; + shared_mac_range = + &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id]; + + if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) { + ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared"); + /* If the shared mac lower 5 GHz frequency is greater than + * non-shared mac lower 5 GHz frequency then the shared mac has + * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high + * freq should be less than the shared mac's low 5 GHz freq. + */ + if (non_shared_range->high_5ghz_freq >= + shared_mac_range->low_5ghz_freq) + non_shared_range->high_5ghz_freq = + max_t(u32, shared_mac_range->low_5ghz_freq - 10, + non_shared_range->low_5ghz_freq); + } else if (shared_mac_range->high_5ghz_freq < + non_shared_range->high_5ghz_freq) { + ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared"); + /* If the shared mac high 5 GHz frequency is less than + * non-shared mac high 5 GHz frequency then the shared mac has + * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low + * freq should be greater than the shared mac's high 5 GHz freq. + */ + if (shared_mac_range->high_5ghz_freq >= + non_shared_range->low_5ghz_freq) + non_shared_range->low_5ghz_freq = + min_t(u32, shared_mac_range->high_5ghz_freq + 10, + non_shared_range->high_5ghz_freq); + } else { + ath12k_warn(ab, "invalid SBS range with all 5 GHz shared"); + return -EINVAL; + } + + return 0; +} + +static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab) +{ + struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; + struct ath12k_hw_mode_freq_range_arg *mac_range; + u16 sbs_range_sep; + u8 phy_id; + int ret; + + mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS]; + + /* If sbs_lower_band_end_freq has a value, then the frequency range + * will be split using that value. + */ + sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq; + if (sbs_range_sep) { + ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep, + mac_range); + ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep, + mac_range); + /* Hardware specifies the range boundary with sbs_range_sep, + * (i.e. the boundary between 5 GHz high and 5 GHz low), + * reset the original one to make sure it will not get used. + */ + memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); + return; + } + + /* If sbs_lower_band_end_freq is not set that means firmware will send one + * shared mac range and one non-shared mac range. so update that freq. + */ + for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { + ret = ath12k_wmi_modify_sbs_freq(ab, phy_id); + if (ret) { + memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); + break; + } + } +} + +static void +ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab, + enum wmi_host_hw_mode_config_type hw_config_type, + u32 phy_id, + struct ath12k_svc_ext_mac_phy_info *mac_cap) +{ + if (phy_id >= MAX_RADIOS) { + ath12k_err(ab, "mac more than two not supported: %d", phy_id); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]", + hw_config_type, phy_id, mac_cap->supported_bands, + ab->wmi_ab.sbs_lower_band_end_freq, + mac_cap->hw_freq_range.low_2ghz_freq, + mac_cap->hw_freq_range.high_2ghz_freq, + mac_cap->hw_freq_range.low_5ghz_freq, + mac_cap->hw_freq_range.high_5ghz_freq); + + switch (hw_config_type) { + case WMI_HOST_HW_MODE_SINGLE: + if (phy_id) { + ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported"); + break; + } + ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id); + break; + + case WMI_HOST_HW_MODE_DBS: + if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) + ath12k_wmi_update_freq_info(ab, mac_cap, + ATH12K_HW_MODE_DBS, phy_id); + break; + case WMI_HOST_HW_MODE_DBS_SBS: + case WMI_HOST_HW_MODE_DBS_OR_SBS: + ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id); + if (ab->wmi_ab.sbs_lower_band_end_freq || + mac_cap->hw_freq_range.low_5ghz_freq || + mac_cap->hw_freq_range.low_2ghz_freq) + ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, + phy_id); + + if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) + ath12k_wmi_update_dbs_freq_info(ab); + if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) + ath12k_wmi_update_sbs_freq_info(ab); + break; + case WMI_HOST_HW_MODE_SBS: + case WMI_HOST_HW_MODE_SBS_PASSIVE: + ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id); + if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) + ath12k_wmi_update_sbs_freq_info(ab); + + break; + default: + break; + } +} + +static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab) +{ + if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) || + (ab->wmi_ab.sbs_lower_band_end_freq && + ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) && + ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE))) + return true; + + return false; +} + +static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab) +{ + struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; + struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; + enum wmi_host_hw_mode_config_type hw_config_type; + struct ath12k_svc_ext_mac_phy_info *tmp; + bool dbs_mode = false, sbs_mode = false; + u32 i, j = 0; + + if (!svc_ext_info->num_hw_modes) { + ath12k_err(ab, "invalid number of hw modes"); + return -EINVAL; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d", + svc_ext_info->num_hw_modes); + + memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps)); + + for (i = 0; i < svc_ext_info->num_hw_modes; i++) { + if (j >= ATH12K_MAX_MAC_PHY_CAP) + return -EINVAL; + + /* Update for MAC0 */ + tmp = &svc_ext_info->mac_phy_info[j++]; + hw_config_type = tmp->hw_mode_config_type; + ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp); + + /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */ + if (hw_config_type == WMI_HOST_HW_MODE_DBS || + hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || + hw_config_type == WMI_HOST_HW_MODE_SBS || + hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) { + if (j >= ATH12K_MAX_MAC_PHY_CAP) + return -EINVAL; + /* Update for MAC1 */ + tmp = &svc_ext_info->mac_phy_info[j++]; + ath12k_wmi_update_mac_freq_info(ab, hw_config_type, + tmp->phy_id, tmp); + + if (hw_config_type == WMI_HOST_HW_MODE_DBS || + hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) + dbs_mode = true; + + if (ath12k_wmi_sbs_range_present(ab) && + (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || + hw_config_type == WMI_HOST_HW_MODE_SBS || + hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)) + sbs_mode = true; + } + } + + info->support_dbs = dbs_mode; + info->support_sbs = sbs_mode; + + ath12k_wmi_dump_freq_range(ab); + + return 0; +} + static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { + const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps; struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; int ret; @@ -4967,7 +5476,32 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, } parse->mac_phy_caps_ext_done = true; + } else if (!parse->hal_reg_caps_ext2_done) { + parse->hal_reg_caps_ext2_done = true; + } else if (!parse->scan_radio_caps_ext2_done) { + parse->scan_radio_caps_ext2_done = true; + } else if (!parse->twt_caps_done) { + parse->twt_caps_done = true; + } else if (!parse->htt_msdu_idx_to_qtype_map_done) { + parse->htt_msdu_idx_to_qtype_map_done = true; + } else if (!parse->dbs_or_sbs_cap_ext_done) { + dbs_or_sbs_caps = ptr; + ab->wmi_ab.sbs_lower_band_end_freq = + __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq); + + ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n", + ab->wmi_ab.sbs_lower_band_end_freq); + + ret = ath12k_wmi_update_hw_mode_list(ab); + if (ret) { + ath12k_warn(ab, "failed to update hw mode list: %d\n", + ret); + return ret; + } + + parse->dbs_or_sbs_cap_ext_done = true; } + break; default: break; @@ -7626,6 +8160,64 @@ static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb, &parse); } +static void ath12k_wmi_fw_stats_process(struct ath12k *ar, + struct ath12k_fw_stats *stats) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev *pdev; + bool is_end = true; + size_t total_vdevs_started = 0; + int i; + + if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { + if (list_empty(&stats->vdevs)) { + ath12k_warn(ab, "empty vdev stats"); + return; + } + /* FW sends all the active VDEV stats irrespective of PDEV, + * hence limit until the count of all VDEVs started + */ + rcu_read_lock(); + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) + total_vdevs_started += pdev->ar->num_started_vdevs; + } + rcu_read_unlock(); + + if (total_vdevs_started) + is_end = ((++ar->fw_stats.num_vdev_recvd) == + total_vdevs_started); + + list_splice_tail_init(&stats->vdevs, + &ar->fw_stats.vdevs); + + if (is_end) + complete(&ar->fw_stats_done); + + return; + } + + if (stats->stats_id == WMI_REQUEST_BCN_STAT) { + if (list_empty(&stats->bcn)) { + ath12k_warn(ab, "empty beacon stats"); + return; + } + /* Mark end until we reached the count of all started VDEVs + * within the PDEV + */ + if (ar->num_started_vdevs) + is_end = ((++ar->fw_stats.num_bcn_recvd) == + ar->num_started_vdevs); + + list_splice_tail_init(&stats->bcn, + &ar->fw_stats.bcn); + + if (is_end) + complete(&ar->fw_stats_done); + } +} + static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) { struct ath12k_fw_stats stats = {}; @@ -7655,19 +8247,15 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk spin_lock_bh(&ar->data_lock); - /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via - * debugfs fw stats. Therefore, processing it separately. - */ + /* Handle WMI_REQUEST_PDEV_STAT status update */ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); - ar->fw_stats.fw_stats_done = true; + complete(&ar->fw_stats_done); goto complete; } - /* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only - * via debugfs fw stats. Hence, processing these in debugfs context. - */ - ath12k_debugfs_fw_stats_process(ar, &stats); + /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */ + ath12k_wmi_fw_stats_process(ar, &stats); complete: complete(&ar->fw_stats_complete); @@ -9911,3 +10499,224 @@ int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, return 0; } + +static int +ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab, + struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap, + struct wmi_mlo_link_set_active_arg *arg) +{ + struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg; + u8 i; + + if (arg->num_disallow_mode_comb > + ARRAY_SIZE(arg->disallow_bmap)) { + ath12k_warn(ab, "invalid num_disallow_mode_comb: %d", + arg->num_disallow_mode_comb); + return -EINVAL; + } + + dislw_bmap_arg = &arg->disallow_bmap[0]; + for (i = 0; i < arg->num_disallow_mode_comb; i++) { + dislw_bmap->tlv_header = + ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap)); + dislw_bmap->disallowed_mode_bitmap = + cpu_to_le32(dislw_bmap_arg->disallowed_mode); + dislw_bmap->ieee_link_id_comb = + le32_encode_bits(dislw_bmap_arg->ieee_link_id[0], + WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) | + le32_encode_bits(dislw_bmap_arg->ieee_link_id[1], + WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) | + le32_encode_bits(dislw_bmap_arg->ieee_link_id[2], + WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) | + le32_encode_bits(dislw_bmap_arg->ieee_link_id[3], + WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4); + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "entry %d disallowed_mode %d ieee_link_id_comb 0x%x", + i, dislw_bmap_arg->disallowed_mode, + dislw_bmap_arg->ieee_link_id_comb); + dislw_bmap++; + dislw_bmap_arg++; + } + + return 0; +} + +int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, + struct wmi_mlo_link_set_active_arg *arg) +{ + struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap; + struct wmi_mlo_set_active_link_number_params *link_num_param; + u32 num_link_num_param = 0, num_vdev_bitmap = 0; + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_mlo_link_set_active_cmd *cmd; + u32 num_inactive_vdev_bitmap = 0; + u32 num_disallow_mode_comb = 0; + struct wmi_tlv *tlv; + struct sk_buff *skb; + __le32 *vdev_bitmap; + void *buf_ptr; + int i, ret; + u32 len; + + if (!arg->num_vdev_bitmap && !arg->num_link_entry) { + ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry"); + return -EINVAL; + } + + switch (arg->force_mode) { + case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM: + case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM: + num_link_num_param = arg->num_link_entry; + fallthrough; + case WMI_MLO_LINK_FORCE_MODE_ACTIVE: + case WMI_MLO_LINK_FORCE_MODE_INACTIVE: + case WMI_MLO_LINK_FORCE_MODE_NO_FORCE: + num_vdev_bitmap = arg->num_vdev_bitmap; + break; + case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE: + num_vdev_bitmap = arg->num_vdev_bitmap; + num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap; + break; + default: + ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode); + return -EINVAL; + } + + num_disallow_mode_comb = arg->num_disallow_mode_comb; + len = sizeof(*cmd) + + TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param + + TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap + + TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE + + TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb; + if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) + len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD, + sizeof(*cmd)); + cmd->force_mode = cpu_to_le32(arg->force_mode); + cmd->reason = cpu_to_le32(arg->reason); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d", + arg->force_mode, arg->reason, num_link_num_param, + num_vdev_bitmap, num_inactive_vdev_bitmap, + num_disallow_mode_comb); + + buf_ptr = skb->data + sizeof(*cmd); + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + sizeof(*link_num_param) * num_link_num_param); + buf_ptr += TLV_HDR_SIZE; + + if (num_link_num_param) { + cmd->ctrl_flags = + le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0, + CRTL_F_DYNC_FORCE_LINK_NUM); + + link_num_param = buf_ptr; + for (i = 0; i < num_link_num_param; i++) { + link_num_param->tlv_header = + ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param)); + link_num_param->num_of_link = + cpu_to_le32(arg->link_num[i].num_of_link); + link_num_param->vdev_type = + cpu_to_le32(arg->link_num[i].vdev_type); + link_num_param->vdev_subtype = + cpu_to_le32(arg->link_num[i].vdev_subtype); + link_num_param->home_freq = + cpu_to_le32(arg->link_num[i].home_freq); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d", + i, arg->link_num[i].num_of_link, + arg->link_num[i].vdev_type, + arg->link_num[i].vdev_subtype, + arg->link_num[i].home_freq, + __le32_to_cpu(cmd->ctrl_flags)); + link_num_param++; + } + + buf_ptr += sizeof(*link_num_param) * num_link_num_param; + } + + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, + sizeof(*vdev_bitmap) * num_vdev_bitmap); + buf_ptr += TLV_HDR_SIZE; + + if (num_vdev_bitmap) { + vdev_bitmap = buf_ptr; + for (i = 0; i < num_vdev_bitmap; i++) { + vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]); + ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x", + i, arg->vdev_bitmap[i]); + } + + buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap; + } + + if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, + sizeof(*vdev_bitmap) * + num_inactive_vdev_bitmap); + buf_ptr += TLV_HDR_SIZE; + + if (num_inactive_vdev_bitmap) { + vdev_bitmap = buf_ptr; + for (i = 0; i < num_inactive_vdev_bitmap; i++) { + vdev_bitmap[i] = + cpu_to_le32(arg->inactive_vdev_bitmap[i]); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "entry %d inactive_vdev_id_bitmap 0x%x", + i, arg->inactive_vdev_bitmap[i]); + } + + buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; + } + } else { + /* add empty vdev bitmap2 tlv */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + buf_ptr += TLV_HDR_SIZE; + } + + /* add empty ieee_link_id_bitmap tlv */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + buf_ptr += TLV_HDR_SIZE; + + /* add empty ieee_link_id_bitmap2 tlv */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + buf_ptr += TLV_HDR_SIZE; + + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + sizeof(*disallowed_mode_bmap) * + arg->num_disallow_mode_comb); + buf_ptr += TLV_HDR_SIZE; + + ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg); + if (ret) + goto free_skb; + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret); + goto free_skb; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd"); + + return ret; + +free_skb: + dev_kfree_skb(skb); + return ret; +} diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index ac18f75e0449..c640ffa180c8 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -1974,6 +1974,7 @@ enum wmi_tlv_tag { WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT, WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5, WMI_TAG_VDEV_CH_POWER_INFO, + WMI_TAG_MLO_LINK_SET_ACTIVE_CMD = 0x3BE, WMI_TAG_EHT_RATE_SET = 0x3C4, WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, WMI_TAG_MLO_TX_SEND_PARAMS, @@ -2617,6 +2618,8 @@ struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params { __le32 num_chainmask_tables; } __packed; +#define WMI_HW_MODE_CAP_CFG_TYPE GENMASK(27, 0) + struct ath12k_wmi_hw_mode_cap_params { __le32 tlv_header; __le32 hw_mode_id; @@ -2666,6 +2669,12 @@ struct ath12k_wmi_mac_phy_caps_params { __le32 he_cap_info_2g_ext; __le32 he_cap_info_5g_ext; __le32 he_cap_info_internal; + __le32 wireless_modes; + __le32 low_2ghz_chan_freq; + __le32 high_2ghz_chan_freq; + __le32 low_5ghz_chan_freq; + __le32 high_5ghz_chan_freq; + __le32 nss_ratio; } __packed; struct ath12k_wmi_hal_reg_caps_ext_params { @@ -2739,6 +2748,11 @@ struct wmi_service_ready_ext2_event { __le32 default_num_msduq_supported_per_tid; } __packed; +struct ath12k_wmi_dbs_or_sbs_cap_params { + __le32 hw_mode_id; + __le32 sbs_lower_band_end_freq; +} __packed; + struct ath12k_wmi_caps_ext_params { __le32 hw_mode_id; __le32 pdev_and_hw_link_ids; @@ -5049,6 +5063,53 @@ struct ath12k_wmi_pdev { u32 rx_decap_mode; }; +struct ath12k_hw_mode_freq_range_arg { + u32 low_2ghz_freq; + u32 high_2ghz_freq; + u32 low_5ghz_freq; + u32 high_5ghz_freq; +}; + +struct ath12k_svc_ext_mac_phy_info { + enum wmi_host_hw_mode_config_type hw_mode_config_type; + u32 phy_id; + u32 supported_bands; + struct ath12k_hw_mode_freq_range_arg hw_freq_range; +}; + +#define ATH12K_MAX_MAC_PHY_CAP 8 + +struct ath12k_svc_ext_info { + u32 num_hw_modes; + struct ath12k_svc_ext_mac_phy_info mac_phy_info[ATH12K_MAX_MAC_PHY_CAP]; +}; + +/** + * enum ath12k_hw_mode - enum for host mode + * @ATH12K_HW_MODE_SMM: Single mac mode + * @ATH12K_HW_MODE_DBS: DBS mode + * @ATH12K_HW_MODE_SBS: SBS mode with either high share or low share + * @ATH12K_HW_MODE_SBS_UPPER_SHARE: Higher 5 GHz shared with 2.4 GHz + * @ATH12K_HW_MODE_SBS_LOWER_SHARE: Lower 5 GHz shared with 2.4 GHz + * @ATH12K_HW_MODE_MAX: Max, used to indicate invalid mode + */ +enum ath12k_hw_mode { + ATH12K_HW_MODE_SMM, + ATH12K_HW_MODE_DBS, + ATH12K_HW_MODE_SBS, + ATH12K_HW_MODE_SBS_UPPER_SHARE, + ATH12K_HW_MODE_SBS_LOWER_SHARE, + ATH12K_HW_MODE_MAX, +}; + +struct ath12k_hw_mode_info { + bool support_dbs:1; + bool support_sbs:1; + + struct ath12k_hw_mode_freq_range_arg freq_range_caps[ATH12K_HW_MODE_MAX] + [MAX_RADIOS]; +}; + struct ath12k_wmi_base { struct ath12k_base *ab; struct ath12k_wmi_pdev wmi[MAX_RADIOS]; @@ -5066,6 +5127,10 @@ struct ath12k_wmi_base { enum wmi_host_hw_mode_config_type preferred_hw_mode; struct ath12k_wmi_target_cap_arg *targ_cap; + + struct ath12k_svc_ext_info svc_ext_info; + u32 sbs_lower_band_end_freq; + struct ath12k_hw_mode_info hw_mode_info; }; struct wmi_pdev_set_bios_interface_cmd { @@ -5997,6 +6062,118 @@ struct wmi_vdev_set_tpc_power_cmd { */ } __packed; +#define CRTL_F_DYNC_FORCE_LINK_NUM GENMASK(3, 2) + +struct wmi_mlo_link_set_active_cmd { + __le32 tlv_header; + __le32 force_mode; + __le32 reason; + __le32 use_ieee_link_id_bitmap; + struct ath12k_wmi_mac_addr_params ap_mld_mac_addr; + __le32 ctrl_flags; +} __packed; + +struct wmi_mlo_set_active_link_number_params { + __le32 tlv_header; + __le32 num_of_link; + __le32 vdev_type; + __le32 vdev_subtype; + __le32 home_freq; +} __packed; + +#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1 GENMASK(7, 0) +#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2 GENMASK(15, 8) +#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3 GENMASK(23, 16) +#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4 GENMASK(31, 24) + +struct wmi_disallowed_mlo_mode_bitmap_params { + __le32 tlv_header; + __le32 disallowed_mode_bitmap; + __le32 ieee_link_id_comb; +} __packed; + +enum wmi_mlo_link_force_mode { + WMI_MLO_LINK_FORCE_MODE_ACTIVE = 1, + WMI_MLO_LINK_FORCE_MODE_INACTIVE = 2, + WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM = 3, + WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM = 4, + WMI_MLO_LINK_FORCE_MODE_NO_FORCE = 5, + WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE = 6, + WMI_MLO_LINK_FORCE_MODE_NON_FORCE_UPDATE = 7, +}; + +enum wmi_mlo_link_force_reason { + WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT = 1, + WMI_MLO_LINK_FORCE_REASON_NEW_DISCONNECT = 2, + WMI_MLO_LINK_FORCE_REASON_LINK_REMOVAL = 3, + WMI_MLO_LINK_FORCE_REASON_TDLS = 4, + WMI_MLO_LINK_FORCE_REASON_REVERT_FAILURE = 5, + WMI_MLO_LINK_FORCE_REASON_LINK_DELETE = 6, + WMI_MLO_LINK_FORCE_REASON_SINGLE_LINK_EMLSR_OP = 7, +}; + +struct wmi_mlo_link_num_arg { + u32 num_of_link; + u32 vdev_type; + u32 vdev_subtype; + u32 home_freq; +}; + +struct wmi_mlo_control_flags_arg { + bool overwrite_force_active_bitmap; + bool overwrite_force_inactive_bitmap; + bool dync_force_link_num; + bool post_re_evaluate; + u8 post_re_evaluate_loops; + bool dont_reschedule_workqueue; +}; + +struct wmi_ml_link_force_cmd_arg { + u8 ap_mld_mac_addr[ETH_ALEN]; + u16 ieee_link_id_bitmap; + u16 ieee_link_id_bitmap2; + u8 link_num; +}; + +struct wmi_ml_disallow_mode_bmap_arg { + u32 disallowed_mode; + union { + u32 ieee_link_id_comb; + u8 ieee_link_id[4]; + }; +}; + +/* maximum size of link number param array + * for MLO link set active command + */ +#define WMI_MLO_LINK_NUM_SZ 2 + +/* maximum size of vdev bitmap array for + * MLO link set active command + */ +#define WMI_MLO_VDEV_BITMAP_SZ 2 + +/* Max number of disallowed bitmap combination + * sent to firmware + */ +#define WMI_ML_MAX_DISALLOW_BMAP_COMB 4 + +struct wmi_mlo_link_set_active_arg { + enum wmi_mlo_link_force_mode force_mode; + enum wmi_mlo_link_force_reason reason; + u32 num_link_entry; + u32 num_vdev_bitmap; + u32 num_inactive_vdev_bitmap; + struct wmi_mlo_link_num_arg link_num[WMI_MLO_LINK_NUM_SZ]; + u32 vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ]; + u32 inactive_vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ]; + struct wmi_mlo_control_flags_arg ctrl_flags; + bool use_ieee_link_id; + struct wmi_ml_link_force_cmd_arg force_cmd; + u32 num_disallow_mode_comb; + struct wmi_ml_disallow_mode_bmap_arg disallow_bmap[WMI_ML_MAX_DISALLOW_BMAP_COMB]; +}; + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -6195,5 +6372,6 @@ bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar); int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, u32 vdev_id, struct ath12k_reg_tpc_power_info *param); - +int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, + struct wmi_mlo_link_set_active_arg *param); #endif diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c index af98e871199d..5a9e93fd1ef4 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, * We need to do some backwards compatibility to make this work. */ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { - WARN_ON(1); + ath6kl_err("mismatched byte count %d vs. expected %zd\n", + le32_to_cpu(targ_info->byte_count), + sizeof(*targ_info)); return -EINVAL; } diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index a3e03580cd9f..564ca6a61985 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb) if (atomic_read(&ar->rx_anch_urbs) == 0) { /* - * The system is too slow to cope with - * the enormous workload. We have simply - * run out of active rx urbs and this - * unfortunately leads to an unpredictable - * device. + * At this point, either the system is too slow to + * cope with the enormous workload (so we have simply + * run out of active rx urbs and this unfortunately + * leads to an unpredictable device), or the device + * is not fully functional after an unsuccessful + * firmware loading attempts (so it doesn't pass + * ieee80211_register_hw() and there is no internal + * workqueue at all). */ - ieee80211_queue_work(ar->hw, &ar->ping_work); + if (ar->registered) + ieee80211_queue_work(ar->hw, &ar->ping_work); + else + pr_warn_once("device %s is not registered\n", + dev_name(&ar->udev->dev)); } } else { /* diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index dbfd45948e8b..66211426aa3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1316,6 +1316,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, sizeof(trans->conf.no_reclaim_cmds)); memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds, sizeof(no_reclaim_cmds)); + trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c index e8820e7cf8fa..1774bb84dd3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -77,6 +77,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, /* Setup async RX handling */ spin_lock_init(&mld->async_handlers_lock); + INIT_LIST_HEAD(&mld->async_handlers_list); wiphy_work_init(&mld->async_handlers_wk, iwl_mld_async_handlers_wk); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c index 81ca9ff67be9..3c255ae916c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c @@ -34,7 +34,7 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), 0); - if (WARN_ON(cmd_ver < 1 && cmd_ver > 3)) + if (WARN_ON(cmd_ver < 1 || cmd_ver > 3)) return; cmd->id_and_color = cpu_to_le32(mvmvif->id); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index cb36baac14da..4f2be0c1bd97 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -166,7 +166,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; - u32 control_flags = 0, rb_size; + u32 control_flags = 0, rb_size, cb_size; dma_addr_t phys; int ret; @@ -202,11 +202,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, rb_size = IWL_CTXT_INFO_RB_SIZE_4K; } - WARN_ON(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)) > 12); + cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)); + if (WARN_ON(cb_size > 12)) + cb_size = 12; + control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; - control_flags |= - u32_encode_bits(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)), - IWL_CTXT_INFO_RB_CB_SIZE); + control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE); control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); ctxt_info->control.control_flags = cpu_to_le32(control_flags); |
